From 85819f468f6e95d838c4575cfb395204390fe9e0 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 26 Nov 2024 15:56:26 +0100
Subject: [PATCH 01/23] Update module go.uber.org/automaxprocs to v1.6.0
(#36549)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
|
[go.uber.org/automaxprocs](https://redirect.github.com/uber-go/automaxprocs)
| `v1.5.3` -> `v1.6.0` |
[![age](https://developer.mend.io/api/mc/badges/age/go/go.uber.org%2fautomaxprocs/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![adoption](https://developer.mend.io/api/mc/badges/adoption/go/go.uber.org%2fautomaxprocs/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![passing](https://developer.mend.io/api/mc/badges/compatibility/go/go.uber.org%2fautomaxprocs/v1.5.3/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![confidence](https://developer.mend.io/api/mc/badges/confidence/go/go.uber.org%2fautomaxprocs/v1.5.3/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
---
> [!WARNING]
> Some dependencies could not be looked up. Check the Dependency
Dashboard for more information.
---
### Release Notes
uber-go/automaxprocs (go.uber.org/automaxprocs)
###
[`v1.6.0`](https://redirect.github.com/uber-go/automaxprocs/blob/HEAD/CHANGELOG.md#v160-2024-07-24)
[Compare
Source](https://redirect.github.com/uber-go/automaxprocs/compare/v1.5.3...v1.6.0)
- Add RoundQuotaFunc option that allows configuration of rounding
behavior for floating point CPU quota.
---
### Configuration
📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any
time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.
â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.
---
- [ ] If you want to rebase/retry this PR, check
this box
---
This PR was generated by [Mend Renovate](https://mend.io/renovate/).
View the [repository job
log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib).
---------
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com>
---
extension/cgroupruntimeextension/go.mod | 2 +-
extension/cgroupruntimeextension/go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/extension/cgroupruntimeextension/go.mod b/extension/cgroupruntimeextension/go.mod
index 485a2e48dbe7..a5ea495bae50 100644
--- a/extension/cgroupruntimeextension/go.mod
+++ b/extension/cgroupruntimeextension/go.mod
@@ -10,7 +10,7 @@ require (
go.opentelemetry.io/collector/confmap v1.20.0
go.opentelemetry.io/collector/extension v0.114.0
go.opentelemetry.io/collector/extension/extensiontest v0.114.0
- go.uber.org/automaxprocs v1.5.3
+ go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0
go.uber.org/zap v1.27.0
)
diff --git a/extension/cgroupruntimeextension/go.sum b/extension/cgroupruntimeextension/go.sum
index 7ed11ad2687e..9c0e407bc105 100644
--- a/extension/cgroupruntimeextension/go.sum
+++ b/extension/cgroupruntimeextension/go.sum
@@ -84,8 +84,8 @@ go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiy
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
-go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
-go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
From ad84ba67bc7e68835427736d8dae3f94a85dfc98 Mon Sep 17 00:00:00 2001
From: Moritz Wiesinger
Date: Tue, 26 Nov 2024 17:07:03 +0100
Subject: [PATCH 02/23] [chore] track markdown-link-check with renovate
(#36552)
#### Description
This PR adds markdown-link-check to the tracked dependencies of
renovate. It achieves that by using the
[`customManagers:githubActionsVersions`](https://docs.renovatebot.com/presets-customManagers/#custommanagersgithubactionsversions)
renovate preset that enables user to update arbitrary versions in github
actions files without having to maintain their own regex for them.
#### Link to tracking issue
Fixes #36259
#### Testing
This setup is already in use in the dynatrace collector distro and works
great: [latest update
PR](https://github.com/Dynatrace/dynatrace-otel-collector/pull/382),
[renovate
config](https://github.com/Dynatrace/dynatrace-otel-collector/blob/07e4662f92b0cadfb311eca74c9269c1c1598634/renovate.json#L6)
---------
Signed-off-by: Moritz Wiesinger
---
.github/workflows/changelog.yml | 4 +++-
.github/workflows/check-links.yaml | 6 +++++-
renovate.json | 5 ++++-
3 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml
index fe0c50398f0f..97a9d78c2b3d 100644
--- a/.github/workflows/changelog.yml
+++ b/.github/workflows/changelog.yml
@@ -15,6 +15,8 @@ env:
# Make sure to exit early if cache segment download times out after 2 minutes.
# We limit cache download as a whole to 5 minutes.
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2
+ # renovate: datasource=github-releases depName=tcort/markdown-link-check
+ MD_LINK_CHECK_VERSION: "3.12.2"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
@@ -84,7 +86,7 @@ jobs:
run: make chlog-preview > changelog_preview.md
- name: Install markdown-link-check
if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}}
- run: npm install -g markdown-link-check@3.12.2
+ run: npm install -g markdown-link-check@${{ env.MD_LINK_CHECK_VERSION }}
- name: Run markdown-link-check
if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}}
run: |
diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml
index 224549ba628a..bbfd0bb2ed49 100644
--- a/.github/workflows/check-links.yaml
+++ b/.github/workflows/check-links.yaml
@@ -9,6 +9,10 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
+env:
+ # renovate: datasource=github-releases depName=tcort/markdown-link-check
+ MD_LINK_CHECK_VERSION: "3.12.2"
+
jobs:
changedfiles:
name: changed files
@@ -36,7 +40,7 @@ jobs:
fetch-depth: 0
- name: Install markdown-link-check
- run: npm install -g markdown-link-check@3.12.2
+ run: npm install -g markdown-link-check@${{ env.MD_LINK_CHECK_VERSION }}
- name: Run markdown-link-check
run: |
diff --git a/renovate.json b/renovate.json
index 74d57112f1c1..3e65271caca1 100644
--- a/renovate.json
+++ b/renovate.json
@@ -8,7 +8,10 @@
"go": "1.22.0"
},
"schedule": ["every tuesday"],
- "extends": ["config:recommended"],
+ "extends": [
+ "config:recommended",
+ "customManagers:githubActionsVersions"
+ ],
"ignorePaths": [
"**/receiver/apachesparkreceiver/testdata/integration/Dockerfile.apache-spark",
"**/receiver/elasticsearchreceiver/testdata/integration/Dockerfile.elasticsearch.7_16_3",
From 9aac2a41232dbf5d6f9444ef9efe3e70257a2d55 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 26 Nov 2024 18:04:02 +0100
Subject: [PATCH 03/23] Update module github.com/stretchr/testify to v1.10.0
(#36554)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
|
[github.com/stretchr/testify](https://redirect.github.com/stretchr/testify)
| `v1.9.0` -> `v1.10.0` |
[![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fstretchr%2ftestify/v1.10.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2fstretchr%2ftestify/v1.10.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2fstretchr%2ftestify/v1.9.0/v1.10.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fstretchr%2ftestify/v1.9.0/v1.10.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
---
> [!WARNING]
> Some dependencies could not be looked up. Check the Dependency
Dashboard for more information.
---
### Release Notes
stretchr/testify (github.com/stretchr/testify)
###
[`v1.10.0`](https://redirect.github.com/stretchr/testify/releases/tag/v1.10.0)
[Compare
Source](https://redirect.github.com/stretchr/testify/compare/v1.9.0...v1.10.0)
#### What's Changed
##### Functional Changes
- Add PanicAssertionFunc by
[@fahimbagar](https://redirect.github.com/fahimbagar) in
[https://github.com/stretchr/testify/pull/1337](https://redirect.github.com/stretchr/testify/pull/1337)
- assert: deprecate CompareType by
[@dolmen](https://redirect.github.com/dolmen) in
[https://github.com/stretchr/testify/pull/1566](https://redirect.github.com/stretchr/testify/pull/1566)
- assert: make YAML dependency pluggable via build tags by
[@dolmen](https://redirect.github.com/dolmen) in
[https://github.com/stretchr/testify/pull/1579](https://redirect.github.com/stretchr/testify/pull/1579)
- assert: new assertion NotElementsMatch by
[@hendrywiranto](https://redirect.github.com/hendrywiranto) in
[https://github.com/stretchr/testify/pull/1600](https://redirect.github.com/stretchr/testify/pull/1600)
- mock: in order mock calls by
[@ReyOrtiz](https://redirect.github.com/ReyOrtiz) in
[https://github.com/stretchr/testify/pull/1637](https://redirect.github.com/stretchr/testify/pull/1637)
- Add assertion for NotErrorAs by
[@palsivertsen](https://redirect.github.com/palsivertsen) in
[https://github.com/stretchr/testify/pull/1129](https://redirect.github.com/stretchr/testify/pull/1129)
- Record Return Arguments of a Call by
[@jayd3e](https://redirect.github.com/jayd3e) in
[https://github.com/stretchr/testify/pull/1636](https://redirect.github.com/stretchr/testify/pull/1636)
- assert.EqualExportedValues: accepts everything by
[@redachl](https://redirect.github.com/redachl) in
[https://github.com/stretchr/testify/pull/1586](https://redirect.github.com/stretchr/testify/pull/1586)
##### Fixes
- assert: make tHelper a type alias by
[@dolmen](https://redirect.github.com/dolmen) in
[https://github.com/stretchr/testify/pull/1562](https://redirect.github.com/stretchr/testify/pull/1562)
- Do not get argument again unnecessarily in Arguments.Error() by
[@TomWright](https://redirect.github.com/TomWright) in
[https://github.com/stretchr/testify/pull/820](https://redirect.github.com/stretchr/testify/pull/820)
- Fix time.Time compare by
[@myxo](https://redirect.github.com/myxo) in
[https://github.com/stretchr/testify/pull/1582](https://redirect.github.com/stretchr/testify/pull/1582)
- assert.Regexp: handle \[]byte array properly by
[@kevinburkesegment](https://redirect.github.com/kevinburkesegment)
in
[https://github.com/stretchr/testify/pull/1587](https://redirect.github.com/stretchr/testify/pull/1587)
- assert: collect.FailNow() should not panic by
[@marshall-lee](https://redirect.github.com/marshall-lee) in
[https://github.com/stretchr/testify/pull/1481](https://redirect.github.com/stretchr/testify/pull/1481)
- mock: simplify implementation of FunctionalOptions by
[@dolmen](https://redirect.github.com/dolmen) in
[https://github.com/stretchr/testify/pull/1571](https://redirect.github.com/stretchr/testify/pull/1571)
- mock: caller information for unexpected method call by
[@spirin](https://redirect.github.com/spirin) in
[https://github.com/stretchr/testify/pull/1644](https://redirect.github.com/stretchr/testify/pull/1644)
- suite: fix test failures by
[@stevenh](https://redirect.github.com/stevenh) in
[https://github.com/stretchr/testify/pull/1421](https://redirect.github.com/stretchr/testify/pull/1421)
- Fix issue
[#1662](https://redirect.github.com/stretchr/testify/issues/1662)
(comparing infs should fail) by
[@ybrustin](https://redirect.github.com/ybrustin) in
[https://github.com/stretchr/testify/pull/1663](https://redirect.github.com/stretchr/testify/pull/1663)
- NotSame should fail if args are not pointers
[#1661](https://redirect.github.com/stretchr/testify/issues/1661)
by [@sikehish](https://redirect.github.com/sikehish) in
[https://github.com/stretchr/testify/pull/1664](https://redirect.github.com/stretchr/testify/pull/1664)
- Increase timeouts in Test_Mock_Called_blocks to reduce flakiness in CI
by [@sikehish](https://redirect.github.com/sikehish) in
[https://github.com/stretchr/testify/pull/1667](https://redirect.github.com/stretchr/testify/pull/1667)
- fix: compare functional option names for indirect calls by
[@arjun-1](https://redirect.github.com/arjun-1) in
[https://github.com/stretchr/testify/pull/1626](https://redirect.github.com/stretchr/testify/pull/1626)
##### Documantation, Build & CI
- .gitignore: ignore "go test -c" binaries by
[@dolmen](https://redirect.github.com/dolmen) in
[https://github.com/stretchr/testify/pull/1565](https://redirect.github.com/stretchr/testify/pull/1565)
- mock: improve doc by
[@dolmen](https://redirect.github.com/dolmen) in
[https://github.com/stretchr/testify/pull/1570](https://redirect.github.com/stretchr/testify/pull/1570)
- mock: fix FunctionalOptions docs by
[@snirye](https://redirect.github.com/snirye) in
[https://github.com/stretchr/testify/pull/1433](https://redirect.github.com/stretchr/testify/pull/1433)
- README: link out to the excellent testifylint by
[@brackendawson](https://redirect.github.com/brackendawson) in
[https://github.com/stretchr/testify/pull/1568](https://redirect.github.com/stretchr/testify/pull/1568)
- assert: fix typo in comment by
[@JohnEndson](https://redirect.github.com/JohnEndson) in
[https://github.com/stretchr/testify/pull/1580](https://redirect.github.com/stretchr/testify/pull/1580)
- Correct the EventuallyWithT and EventuallyWithTf example by
[@JonCrowther](https://redirect.github.com/JonCrowther) in
[https://github.com/stretchr/testify/pull/1588](https://redirect.github.com/stretchr/testify/pull/1588)
- CI: bump softprops/action-gh-release from 1 to 2 by
[@dependabot](https://redirect.github.com/dependabot) in
[https://github.com/stretchr/testify/pull/1575](https://redirect.github.com/stretchr/testify/pull/1575)
- mock: document more alternatives to deprecated AnythingOfTypeArgument
by [@dolmen](https://redirect.github.com/dolmen) in
[https://github.com/stretchr/testify/pull/1569](https://redirect.github.com/stretchr/testify/pull/1569)
- assert: Correctly document EqualValues behavior by
[@brackendawson](https://redirect.github.com/brackendawson) in
[https://github.com/stretchr/testify/pull/1593](https://redirect.github.com/stretchr/testify/pull/1593)
- fix: grammar in godoc by
[@miparnisari](https://redirect.github.com/miparnisari) in
[https://github.com/stretchr/testify/pull/1607](https://redirect.github.com/stretchr/testify/pull/1607)
- .github/workflows: Run tests for Go 1.22 by
[@HaraldNordgren](https://redirect.github.com/HaraldNordgren) in
[https://github.com/stretchr/testify/pull/1629](https://redirect.github.com/stretchr/testify/pull/1629)
- Document suite's lack of support for t.Parallel by
[@brackendawson](https://redirect.github.com/brackendawson) in
[https://github.com/stretchr/testify/pull/1645](https://redirect.github.com/stretchr/testify/pull/1645)
- assert: fix typos in comments by
[@alexandear](https://redirect.github.com/alexandear) in
[https://github.com/stretchr/testify/pull/1650](https://redirect.github.com/stretchr/testify/pull/1650)
- mock: fix doc comment for NotBefore by
[@alexandear](https://redirect.github.com/alexandear) in
[https://github.com/stretchr/testify/pull/1651](https://redirect.github.com/stretchr/testify/pull/1651)
- Generate better comments for require package by
[@Neokil](https://redirect.github.com/Neokil) in
[https://github.com/stretchr/testify/pull/1610](https://redirect.github.com/stretchr/testify/pull/1610)
- README: replace Testify V2 notice with
[@dolmen](https://redirect.github.com/dolmen)'s V2 manifesto by
[@hendrywiranto](https://redirect.github.com/hendrywiranto) in
[https://github.com/stretchr/testify/pull/1518](https://redirect.github.com/stretchr/testify/pull/1518)
#### New Contributors
- [@fahimbagar](https://redirect.github.com/fahimbagar) made
their first contribution in
[https://github.com/stretchr/testify/pull/1337](https://redirect.github.com/stretchr/testify/pull/1337)
- [@TomWright](https://redirect.github.com/TomWright) made their
first contribution in
[https://github.com/stretchr/testify/pull/820](https://redirect.github.com/stretchr/testify/pull/820)
- [@snirye](https://redirect.github.com/snirye) made their first
contribution in
[https://github.com/stretchr/testify/pull/1433](https://redirect.github.com/stretchr/testify/pull/1433)
- [@myxo](https://redirect.github.com/myxo) made their first
contribution in
[https://github.com/stretchr/testify/pull/1582](https://redirect.github.com/stretchr/testify/pull/1582)
- [@JohnEndson](https://redirect.github.com/JohnEndson) made
their first contribution in
[https://github.com/stretchr/testify/pull/1580](https://redirect.github.com/stretchr/testify/pull/1580)
- [@JonCrowther](https://redirect.github.com/JonCrowther) made
their first contribution in
[https://github.com/stretchr/testify/pull/1588](https://redirect.github.com/stretchr/testify/pull/1588)
- [@miparnisari](https://redirect.github.com/miparnisari) made
their first contribution in
[https://github.com/stretchr/testify/pull/1607](https://redirect.github.com/stretchr/testify/pull/1607)
- [@marshall-lee](https://redirect.github.com/marshall-lee) made
their first contribution in
[https://github.com/stretchr/testify/pull/1481](https://redirect.github.com/stretchr/testify/pull/1481)
- [@spirin](https://redirect.github.com/spirin) made their first
contribution in
[https://github.com/stretchr/testify/pull/1644](https://redirect.github.com/stretchr/testify/pull/1644)
- [@ReyOrtiz](https://redirect.github.com/ReyOrtiz) made their
first contribution in
[https://github.com/stretchr/testify/pull/1637](https://redirect.github.com/stretchr/testify/pull/1637)
- [@stevenh](https://redirect.github.com/stevenh) made their
first contribution in
[https://github.com/stretchr/testify/pull/1421](https://redirect.github.com/stretchr/testify/pull/1421)
- [@jayd3e](https://redirect.github.com/jayd3e) made their first
contribution in
[https://github.com/stretchr/testify/pull/1636](https://redirect.github.com/stretchr/testify/pull/1636)
- [@Neokil](https://redirect.github.com/Neokil) made their first
contribution in
[https://github.com/stretchr/testify/pull/1610](https://redirect.github.com/stretchr/testify/pull/1610)
- [@redachl](https://redirect.github.com/redachl) made their
first contribution in
[https://github.com/stretchr/testify/pull/1586](https://redirect.github.com/stretchr/testify/pull/1586)
- [@ybrustin](https://redirect.github.com/ybrustin) made their
first contribution in
[https://github.com/stretchr/testify/pull/1663](https://redirect.github.com/stretchr/testify/pull/1663)
- [@sikehish](https://redirect.github.com/sikehish) made their
first contribution in
[https://github.com/stretchr/testify/pull/1664](https://redirect.github.com/stretchr/testify/pull/1664)
- [@arjun-1](https://redirect.github.com/arjun-1) made their
first contribution in
[https://github.com/stretchr/testify/pull/1626](https://redirect.github.com/stretchr/testify/pull/1626)
**Full Changelog**:
https://github.com/stretchr/testify/compare/v1.9.0...v1.10.0
---
### Configuration
📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any
time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.
â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.
---
- [ ] If you want to rebase/retry this PR, check
this box
---
This PR was generated by [Mend Renovate](https://mend.io/renovate/).
View the [repository job
log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib).
---------
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com>
---
extension/cgroupruntimeextension/go.mod | 2 +-
extension/cgroupruntimeextension/go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/extension/cgroupruntimeextension/go.mod b/extension/cgroupruntimeextension/go.mod
index a5ea495bae50..1346bd6c0e27 100644
--- a/extension/cgroupruntimeextension/go.mod
+++ b/extension/cgroupruntimeextension/go.mod
@@ -4,7 +4,7 @@ go 1.22.0
require (
github.com/KimMachineGun/automemlimit v0.6.1
- github.com/stretchr/testify v1.9.0
+ github.com/stretchr/testify v1.10.0
go.opentelemetry.io/collector/component v0.114.0
go.opentelemetry.io/collector/component/componenttest v0.114.0
go.opentelemetry.io/collector/confmap v1.20.0
diff --git a/extension/cgroupruntimeextension/go.sum b/extension/cgroupruntimeextension/go.sum
index 9c0e407bc105..a3d6cb387828 100644
--- a/extension/cgroupruntimeextension/go.sum
+++ b/extension/cgroupruntimeextension/go.sum
@@ -56,8 +56,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/collector/component v0.114.0 h1:SVGbm5LvHGSTEDv7p92oPuBgK5tuiWR82I9+LL4TtBE=
From a5068da5a0cbb68a91871a7d886aaa130c4bbe1d Mon Sep 17 00:00:00 2001
From: Christos Markou
Date: Tue, 26 Nov 2024 19:11:09 +0200
Subject: [PATCH 04/23] [receiver/receiver_creator] Add support for enabling
receivers/scrapers from K8s hints (#35617)
---
.chloggen/hints.yaml | 27 ++
extension/observer/endpoints.go | 6 +-
receiver/receivercreator/README.md | 188 +++++++++
receiver/receivercreator/config.go | 14 +
receiver/receivercreator/config_test.go | 2 +
receiver/receivercreator/discovery.go | 204 +++++++++
receiver/receivercreator/discovery_test.go | 398 ++++++++++++++++++
receiver/receivercreator/fixtures_test.go | 22 +
receiver/receivercreator/go.mod | 4 +-
receiver/receivercreator/observerhandler.go | 170 ++++----
.../receivercreator/observerhandler_test.go | 66 +++
11 files changed, 1025 insertions(+), 76 deletions(-)
create mode 100644 .chloggen/hints.yaml
create mode 100644 receiver/receivercreator/discovery.go
create mode 100644 receiver/receivercreator/discovery_test.go
diff --git a/.chloggen/hints.yaml b/.chloggen/hints.yaml
new file mode 100644
index 000000000000..764dc23441f2
--- /dev/null
+++ b/.chloggen/hints.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: receivercreator
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Add support for starting receivers/scrapers based on provided annotations' hints for metrics' collection
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [34427]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/extension/observer/endpoints.go b/extension/observer/endpoints.go
index cd51a35e036d..2f58c6932c26 100644
--- a/extension/observer/endpoints.go
+++ b/extension/observer/endpoints.go
@@ -223,11 +223,11 @@ func (p *Pod) Type() EndpointType {
// PodContainer is a discovered k8s pod's container
type PodContainer struct {
// Name of the container
- Name string
+ Name string `mapstructure:"container_name"`
// Image of the container
- Image string
+ Image string `mapstructure:"container_image"`
// ContainerID is the id of the container exposing the Endpoint
- ContainerID string
+ ContainerID string `mapstructure:"container_id"`
// Pod is the k8s pod in which the container is running
Pod Pod
}
diff --git a/receiver/receivercreator/README.md b/receiver/receivercreator/README.md
index 70ed6e5cb808..ac836eb98e02 100644
--- a/receiver/receivercreator/README.md
+++ b/receiver/receivercreator/README.md
@@ -439,3 +439,191 @@ service:
The full list of settings exposed for this receiver are documented [here](./config.go)
with detailed sample configurations [here](./testdata/config.yaml).
+
+
+## Generate receiver configurations from provided Hints
+
+Note: When hints feature is enabled if hints are present for an endpoint no receiver templates will be evaluated.
+
+Currently this feature is only supported for K8s environments and the `k8sobserver`.
+
+The discovery feature for K8s is enabled with the following setting:
+
+```yaml
+receiver_creator/metrics:
+ watch_observers: [ k8s_observer ]
+ discovery:
+ enabled: true
+ # Define which receivers should be ignored when provided through annotations
+ # ignore_receivers: []
+```
+
+Find bellow the supported annotations that user can define to automatically enable receivers to start collecting metrics signals from the target Pods/containers.
+
+### Supported metrics annotations
+
+#### Enable/disable discovery
+
+`io.opentelemetry.discovery.metrics/enabled` (Required. `"true"` or `"false"`)
+
+#### Define scraper
+
+`io.opentelemetry.discovery.metrics/scraper` (example: `"nginx"`)
+
+
+#### Define configuration
+
+`io.opentelemetry.discovery.metrics/config`
+
+For `"endpoint"` setting specifically, it sticks to urls that include
+```"`endpoint`"``` as it comes from the Port endpoint which is
+in form of `pod_ip:container_port`. This is to ensure that each Pod can only
+generate configuration that targets itself and not others.
+If no endpoint is provided the Pod's endpoint will be used (in form of `pod_ip:container_port`).
+
+**Example:**
+
+```yaml
+io.opentelemetry.discovery.metrics/config: |
+ endpoint: "http://`endpoint`/nginx_status"
+ collection_interval: "20s"
+ initial_delay: "20s"
+ read_buffer_size: "10"
+ xyz: "abc"
+```
+
+
+#### Support multiple target containers
+
+Users can target the annotation to a specific container by suffixing it with the name of the port that container exposes:
+`io.opentelemetry.discovery.metrics./config`.
+For example:
+```yaml
+io.opentelemetry.discovery.metrics.80/config: |
+ endpoint: "http://`endpoint`/nginx_status"
+```
+where `80` is the port that the target container exposes.
+
+If a Pod is annotated with both container level hints and pod level hints the container level hints have priority and
+the Pod level hints are used as a fallback (see detailed example bellow).
+
+The current implementation relies on the implementation of `k8sobserver` extension and specifically
+the [pod_endpoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.111.0/extension/observer/k8sobserver/pod_endpoint.go).
+The hints are evaluated per container by extracting the annotations from each [`Port` endpoint](#Port) that is emitted.
+
+
+
+### Examples
+
+#### Metrics example
+
+Collector's configuration:
+```yaml
+receivers:
+ receiver_creator/metrics:
+ watch_observers: [ k8s_observer ]
+ discovery:
+ enabled: true
+ receivers:
+
+service:
+ extensions: [ k8s_observer]
+ pipelines:
+ metrics:
+ receivers: [ receiver_creator ]
+ processors: []
+ exporters: [ debug ]
+```
+
+Target Pod annotated with hints:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nginx-conf
+data:
+ nginx.conf: |
+ user nginx;
+ worker_processes 1;
+ error_log /dev/stderr warn;
+ pid /var/run/nginx.pid;
+ events {
+ worker_connections 1024;
+ }
+ http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+ access_log /dev/stdout main;
+ server {
+ listen 80;
+ server_name localhost;
+
+ location /nginx_status {
+ stub_status on;
+ }
+ }
+ include /etc/nginx/conf.d/*;
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: redis-deployment
+ labels:
+ app: redis
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: redis
+ template:
+ metadata:
+ labels:
+ app: redis
+ annotations:
+ # redis container port metrics hints
+ io.opentelemetry.discovery.metrics.6379/enabled: "true"
+ io.opentelemetry.discovery.metrics.6379/scraper: redis
+ io.opentelemetry.discovery.metrics.6379/config: |
+ collection_interval: "20s"
+ timeout: "10s"
+
+ # nginx container port metrics hints
+ io.opentelemetry.discovery.metrics.80/enabled: "true"
+ io.opentelemetry.discovery.metrics.80/scraper: nginx
+ io.opentelemetry.discovery.metrics.80/config: |
+ endpoint: "http://`endpoint`/nginx_status"
+ collection_interval: "30s"
+ timeout: "20s"
+ spec:
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-conf
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ containers:
+ - name: webserver
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: webserver
+ volumeMounts:
+ - mountPath: /etc/nginx/nginx.conf
+ readOnly: true
+ subPath: nginx.conf
+ name: nginx-conf
+ - image: redis
+ imagePullPolicy: IfNotPresent
+ name: redis
+ ports:
+ - name: redis
+ containerPort: 6379
+ protocol: TCP
+```
\ No newline at end of file
diff --git a/receiver/receivercreator/config.go b/receiver/receivercreator/config.go
index bb5ebfaa4f6f..e531ccf8c913 100644
--- a/receiver/receivercreator/config.go
+++ b/receiver/receivercreator/config.go
@@ -35,6 +35,12 @@ type receiverConfig struct {
// userConfigMap is an arbitrary map of string keys to arbitrary values as specified by the user
type userConfigMap map[string]any
+type receiverSignals struct {
+ metrics bool
+ logs bool
+ traces bool
+}
+
// receiverTemplate is the configuration of a single subreceiver.
type receiverTemplate struct {
receiverConfig
@@ -46,6 +52,7 @@ type receiverTemplate struct {
// It can contain expr expressions for endpoint env value expansion
ResourceAttributes map[string]any `mapstructure:"resource_attributes"`
rule rule
+ signals receiverSignals
}
// resourceAttributes holds a map of default resource attributes for each Endpoint type.
@@ -60,6 +67,7 @@ func newReceiverTemplate(name string, cfg userConfigMap) (receiverTemplate, erro
}
return receiverTemplate{
+ signals: receiverSignals{metrics: true, logs: true, traces: true},
receiverConfig: receiverConfig{
id: id,
config: cfg,
@@ -78,6 +86,12 @@ type Config struct {
// ResourceAttributes is a map of default resource attributes to add to each resource
// object received by this receiver from dynamically created receivers.
ResourceAttributes resourceAttributes `mapstructure:"resource_attributes"`
+ Discovery DiscoveryConfig `mapstructure:"discovery"`
+}
+
+type DiscoveryConfig struct {
+ Enabled bool `mapstructure:"enabled"`
+ IgnoreReceivers []string `mapstructure:"ignore_receivers"`
}
func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error {
diff --git a/receiver/receivercreator/config_test.go b/receiver/receivercreator/config_test.go
index ee8779de712e..618add4e174d 100644
--- a/receiver/receivercreator/config_test.go
+++ b/receiver/receivercreator/config_test.go
@@ -90,6 +90,7 @@ func TestLoadConfig(t *testing.T) {
Rule: `type == "port"`,
ResourceAttributes: map[string]any{"one": "two"},
rule: portRule,
+ signals: receiverSignals{true, true, true},
},
"nop/1": {
receiverConfig: receiverConfig{
@@ -102,6 +103,7 @@ func TestLoadConfig(t *testing.T) {
Rule: `type == "port"`,
ResourceAttributes: map[string]any{"two": "three"},
rule: portRule,
+ signals: receiverSignals{true, true, true},
},
},
WatchObservers: []component.ID{
diff --git a/receiver/receivercreator/discovery.go b/receiver/receivercreator/discovery.go
new file mode 100644
index 000000000000..f8a694912751
--- /dev/null
+++ b/receiver/receivercreator/discovery.go
@@ -0,0 +1,204 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package receivercreator // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator"
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/go-viper/mapstructure/v2"
+ "go.uber.org/zap"
+ "gopkg.in/yaml.v3"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer"
+)
+
+const (
+ // hints prefix
+ otelHints = "io.opentelemetry.discovery"
+
+ // hint suffix for metrics
+ otelMetricsHints = otelHints + ".metrics"
+
+ // hints definitions
+ discoveryEnabledHint = "enabled"
+ scraperHint = "scraper"
+ configHint = "config"
+)
+
+// k8sHintsBuilder creates configurations from hints provided as Pod's annotations.
+type k8sHintsBuilder struct {
+ logger *zap.Logger
+ ignoreReceivers map[string]bool
+}
+
+func createK8sHintsBuilder(config DiscoveryConfig, logger *zap.Logger) k8sHintsBuilder {
+ ignoreReceivers := make(map[string]bool, len(config.IgnoreReceivers))
+ for _, r := range config.IgnoreReceivers {
+ ignoreReceivers[r] = true
+ }
+ return k8sHintsBuilder{
+ logger: logger,
+ ignoreReceivers: ignoreReceivers,
+ }
+}
+
+// createReceiverTemplateFromHints creates a receiver configuration based on the provided hints.
+// Hints are extracted from Pod's annotations.
+// Scraper configurations are only created for Port Endpoints.
+// TODO: Log receiver configurations are only created for Pod Container Endpoints.
+func (builder *k8sHintsBuilder) createReceiverTemplateFromHints(env observer.EndpointEnv) (*receiverTemplate, error) {
+ var pod observer.Pod
+
+ endpointType := getStringEnv(env, "type")
+ if endpointType == "" {
+ return nil, fmt.Errorf("could not get endpoint type: %v", zap.Any("env", env))
+ }
+
+ if endpointType != string(observer.PortType) {
+ return nil, nil
+ }
+
+ builder.logger.Debug("handling hints for added endpoint", zap.Any("env", env))
+
+ if endpointPod, ok := env["pod"]; ok {
+ err := mapstructure.Decode(endpointPod, &pod)
+ if err != nil {
+ return nil, fmt.Errorf("could not extract endpoint's pod: %v", zap.Any("endpointPod", pod))
+ }
+ } else {
+ return nil, nil
+ }
+
+ return builder.createScraper(pod.Annotations, env)
+}
+
+func (builder *k8sHintsBuilder) createScraper(
+ annotations map[string]string,
+ env observer.EndpointEnv,
+) (*receiverTemplate, error) {
+ var port uint16
+ var p observer.Port
+ err := mapstructure.Decode(env, &p)
+ if err != nil {
+ return nil, fmt.Errorf("could not extract port event: %v", zap.Any("env", env))
+ }
+ if p.Port == 0 {
+ return nil, fmt.Errorf("could not extract port: %v", zap.Any("env", env))
+ }
+ port = p.Port
+ pod := p.Pod
+
+ if !discoveryMetricsEnabled(annotations, otelMetricsHints, fmt.Sprint(port)) {
+ return nil, nil
+ }
+
+ subreceiverKey, found := getHintAnnotation(annotations, otelMetricsHints, scraperHint, fmt.Sprint(port))
+ if !found || subreceiverKey == "" {
+ // no scraper hint detected
+ return nil, nil
+ }
+ if _, ok := builder.ignoreReceivers[subreceiverKey]; ok {
+ // scraper is ignored
+ return nil, nil
+ }
+ builder.logger.Debug("handling added hinted receiver", zap.Any("subreceiverKey", subreceiverKey))
+
+ defaultEndpoint := getStringEnv(env, endpointConfigKey)
+ userConfMap, err := getScraperConfFromAnnotations(annotations, defaultEndpoint, fmt.Sprint(port), builder.logger)
+ if err != nil {
+ return nil, fmt.Errorf("could not create receiver configuration: %v", zap.Any("err", err))
+ }
+
+ recTemplate, err := newReceiverTemplate(fmt.Sprintf("%v/%v_%v", subreceiverKey, pod.UID, port), userConfMap)
+ recTemplate.signals = receiverSignals{true, false, false}
+
+ return &recTemplate, err
+}
+
+func getScraperConfFromAnnotations(
+ annotations map[string]string,
+ defaultEndpoint, scopeSuffix string,
+ logger *zap.Logger,
+) (userConfigMap, error) {
+ conf := userConfigMap{}
+ conf[endpointConfigKey] = defaultEndpoint
+
+ configStr, found := getHintAnnotation(annotations, otelMetricsHints, configHint, scopeSuffix)
+ if !found || configStr == "" {
+ return conf, nil
+ }
+ if err := yaml.Unmarshal([]byte(configStr), &conf); err != nil {
+ return userConfigMap{}, fmt.Errorf("could not unmarshal configuration from hint: %v", zap.Error(err))
+ }
+
+ val := conf[endpointConfigKey]
+ confEndpoint, ok := val.(string)
+ if !ok {
+ logger.Debug("could not extract configured endpoint")
+ return userConfigMap{}, fmt.Errorf("could not extract configured endpoint")
+ }
+
+ err := validateEndpoint(confEndpoint, defaultEndpoint)
+ if err != nil {
+ logger.Debug("configured endpoint is not valid", zap.Error(err))
+ return userConfigMap{}, fmt.Errorf("configured endpoint is not valid: %v", zap.Error(err))
+ }
+ return conf, nil
+}
+
+func getHintAnnotation(annotations map[string]string, hintBase string, hintKey string, suffix string) (string, bool) {
+ // try to scope the hint more on container level by suffixing
+ // with . in case of Port event or # TODO: . in case of Pod Container event
+ containerLevelHint, ok := annotations[fmt.Sprintf("%s.%s/%s", hintBase, suffix, hintKey)]
+ if ok {
+ return containerLevelHint, ok
+ }
+
+ // if there is no container level hint defined try to use the Pod level hint
+ podLevelHint, ok := annotations[fmt.Sprintf("%s/%s", hintBase, hintKey)]
+ return podLevelHint, ok
+}
+
+func discoveryMetricsEnabled(annotations map[string]string, hintBase string, scopeSuffix string) bool {
+ enabledHint, found := getHintAnnotation(annotations, hintBase, discoveryEnabledHint, scopeSuffix)
+ if !found {
+ return false
+ }
+ return enabledHint == "true"
+}
+
+func getStringEnv(env observer.EndpointEnv, key string) string {
+ var valString string
+ if val, ok := env[key]; ok {
+ valString, ok = val.(string)
+ if !ok {
+ return ""
+ }
+ }
+ return valString
+}
+
+func validateEndpoint(endpoint, defaultEndpoint string) error {
+ // replace temporarily the dynamic reference to ease the url parsing
+ endpoint = strings.ReplaceAll(endpoint, "`endpoint`", defaultEndpoint)
+
+ uri, _ := url.Parse(endpoint)
+ // target endpoint can come in form ip:port. In that case we fix the uri
+ // temporarily with adding http scheme
+ if uri == nil {
+ u, err := url.Parse("http://" + endpoint)
+ if err != nil {
+ return fmt.Errorf("could not parse enpoint")
+ }
+ uri = u
+ }
+
+ // configured endpoint should include the target Pod's endpoint
+ if uri.Host != defaultEndpoint {
+ return fmt.Errorf("configured enpoint should include target Pod's endpoint")
+ }
+ return nil
+}
diff --git a/receiver/receivercreator/discovery_test.go b/receiver/receivercreator/discovery_test.go
new file mode 100644
index 000000000000..982f78de8396
--- /dev/null
+++ b/receiver/receivercreator/discovery_test.go
@@ -0,0 +1,398 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package receivercreator
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/component"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer"
+)
+
+func TestK8sHintsBuilderMetrics(t *testing.T) {
+ logger := zaptest.NewLogger(t, zaptest.Level(zap.InfoLevel))
+
+ id := component.ID{}
+ err := id.UnmarshalText([]byte("redis/pod-2-UID_6379"))
+ assert.NoError(t, err)
+
+ config := `
+collection_interval: "20s"
+timeout: "30s"
+username: "username"
+password: "changeme"`
+ configRedis := `
+collection_interval: "20s"
+timeout: "130s"
+username: "username"
+password: "changeme"`
+
+ tests := map[string]struct {
+ inputEndpoint observer.Endpoint
+ expectedReceiver receiverTemplate
+ ignoreReceivers []string
+ wantError bool
+ }{
+ `metrics_pod_level_hints_only`: {
+ inputEndpoint: observer.Endpoint{
+ ID: "namespace/pod-2-UID/redis(6379)",
+ Target: "1.2.3.4:6379",
+ Details: &observer.Port{
+ Name: "redis", Pod: observer.Pod{
+ Name: "pod-2",
+ Namespace: "default",
+ UID: "pod-2-UID",
+ Labels: map[string]string{"env": "prod"},
+ Annotations: map[string]string{
+ otelMetricsHints + "/enabled": "true",
+ otelMetricsHints + "/scraper": "redis",
+ otelMetricsHints + "/config": config,
+ },
+ },
+ Port: 6379,
+ },
+ },
+ expectedReceiver: receiverTemplate{
+ receiverConfig: receiverConfig{
+ id: id,
+ config: userConfigMap{"collection_interval": "20s", "endpoint": "1.2.3.4:6379", "password": "changeme", "timeout": "30s", "username": "username"},
+ }, signals: receiverSignals{metrics: true, logs: false, traces: false},
+ },
+ wantError: false,
+ ignoreReceivers: []string{},
+ }, `metrics_pod_level_ignore`: {
+ inputEndpoint: observer.Endpoint{
+ ID: "namespace/pod-2-UID/redis(6379)",
+ Target: "1.2.3.4:6379",
+ Details: &observer.Port{
+ Name: "redis", Pod: observer.Pod{
+ Name: "pod-2",
+ Namespace: "default",
+ UID: "pod-2-UID",
+ Labels: map[string]string{"env": "prod"},
+ Annotations: map[string]string{
+ otelMetricsHints + "/enabled": "true",
+ otelMetricsHints + "/scraper": "redis",
+ otelMetricsHints + "/config": config,
+ },
+ },
+ Port: 6379,
+ },
+ },
+ expectedReceiver: receiverTemplate{},
+ wantError: false,
+ ignoreReceivers: []string{"redis"},
+ }, `metrics_pod_level_hints_only_defaults`: {
+ inputEndpoint: observer.Endpoint{
+ ID: "namespace/pod-2-UID/redis(6379)",
+ Target: "1.2.3.4:6379",
+ Details: &observer.Port{
+ Name: "redis", Pod: observer.Pod{
+ Name: "pod-2",
+ Namespace: "default",
+ UID: "pod-2-UID",
+ Labels: map[string]string{"env": "prod"},
+ Annotations: map[string]string{
+ otelMetricsHints + "/enabled": "true",
+ otelMetricsHints + "/scraper": "redis",
+ },
+ },
+ Port: 6379,
+ },
+ },
+ expectedReceiver: receiverTemplate{
+ receiverConfig: receiverConfig{
+ id: id,
+ config: userConfigMap{"endpoint": "1.2.3.4:6379"},
+ }, signals: receiverSignals{metrics: true, logs: false, traces: false},
+ },
+ wantError: false,
+ ignoreReceivers: []string{},
+ }, `metrics_container_level_hints`: {
+ inputEndpoint: observer.Endpoint{
+ ID: "namespace/pod-2-UID/redis(6379)",
+ Target: "1.2.3.4:6379",
+ Details: &observer.Port{
+ Name: "redis", Pod: observer.Pod{
+ Name: "pod-2",
+ Namespace: "default",
+ UID: "pod-2-UID",
+ Labels: map[string]string{"env": "prod"},
+ Annotations: map[string]string{
+ otelMetricsHints + ".6379/enabled": "true",
+ otelMetricsHints + ".6379/scraper": "redis",
+ otelMetricsHints + ".6379/config": config,
+ },
+ },
+ Port: 6379,
+ },
+ },
+ expectedReceiver: receiverTemplate{
+ receiverConfig: receiverConfig{
+ id: id,
+ config: userConfigMap{"collection_interval": "20s", "endpoint": "1.2.3.4:6379", "password": "changeme", "timeout": "30s", "username": "username"},
+ }, signals: receiverSignals{metrics: true, logs: false, traces: false},
+ },
+ wantError: false,
+ ignoreReceivers: []string{},
+ }, `metrics_mix_level_hints`: {
+ inputEndpoint: observer.Endpoint{
+ ID: "namespace/pod-2-UID/redis(6379)",
+ Target: "1.2.3.4:6379",
+ Details: &observer.Port{
+ Name: "redis", Pod: observer.Pod{
+ Name: "pod-2",
+ Namespace: "default",
+ UID: "pod-2-UID",
+ Labels: map[string]string{"env": "prod"},
+ Annotations: map[string]string{
+ otelMetricsHints + ".6379/enabled": "true",
+ otelMetricsHints + ".6379/scraper": "redis",
+ otelMetricsHints + "/config": config,
+ otelMetricsHints + ".6379/config": configRedis,
+ },
+ },
+ Port: 6379,
+ },
+ },
+ expectedReceiver: receiverTemplate{
+ receiverConfig: receiverConfig{
+ id: id,
+ config: userConfigMap{"collection_interval": "20s", "endpoint": "1.2.3.4:6379", "password": "changeme", "timeout": "130s", "username": "username"},
+ }, signals: receiverSignals{metrics: true, logs: false, traces: false},
+ },
+ wantError: false,
+ ignoreReceivers: []string{},
+ }, `metrics_no_port_error`: {
+ inputEndpoint: observer.Endpoint{
+ ID: "namespace/pod-2-UID/redis(6379)",
+ Target: "1.2.3.4",
+ Details: &observer.Port{
+ Name: "redis", Pod: observer.Pod{
+ Name: "pod-2",
+ Namespace: "default",
+ UID: "pod-2-UID",
+ Labels: map[string]string{"env": "prod"},
+ Annotations: map[string]string{
+ otelMetricsHints + "/enabled": "true",
+ otelMetricsHints + "/scraper": "redis",
+ otelMetricsHints + "/config": config,
+ },
+ },
+ },
+ },
+ expectedReceiver: receiverTemplate{},
+ wantError: true,
+ ignoreReceivers: []string{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ builder := createK8sHintsBuilder(DiscoveryConfig{Enabled: true, IgnoreReceivers: test.ignoreReceivers}, logger)
+ env, err := test.inputEndpoint.Env()
+ require.NoError(t, err)
+ subreceiverTemplate, err := builder.createReceiverTemplateFromHints(env)
+ if subreceiverTemplate == nil {
+ require.Equal(t, receiverTemplate{}, test.expectedReceiver)
+ return
+ }
+ if !test.wantError {
+ require.NoError(t, err)
+ require.Equal(t, subreceiverTemplate.receiverConfig.config, test.expectedReceiver.receiverConfig.config)
+ require.Equal(t, subreceiverTemplate.signals, test.expectedReceiver.signals)
+ require.Equal(t, subreceiverTemplate.id, test.expectedReceiver.id)
+ } else {
+ require.Error(t, err)
+ }
+ })
+ }
+}
+
+func TestGetConfFromAnnotations(t *testing.T) {
+ config := `
+endpoint: "0.0.0.0:8080"
+collection_interval: "20s"
+initial_delay: "20s"
+read_buffer_size: "10"
+nested_example:
+ foo: bar`
+ configNoEndpoint := `
+collection_interval: "20s"
+initial_delay: "20s"
+read_buffer_size: "10"
+nested_example:
+ foo: bar`
+ tests := map[string]struct {
+ hintsAnn map[string]string
+ expectedConf userConfigMap
+ defaultEndpoint string
+ scopeSuffix string
+ expectError bool
+ }{
+ "simple_annotation_case": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics/enabled": "true",
+ "io.opentelemetry.discovery.metrics/config": config,
+ }, expectedConf: userConfigMap{
+ "collection_interval": "20s",
+ "endpoint": "0.0.0.0:8080",
+ "initial_delay": "20s",
+ "read_buffer_size": "10",
+ "nested_example": userConfigMap{"foo": "bar"},
+ }, defaultEndpoint: "0.0.0.0:8080",
+ scopeSuffix: "",
+ }, "simple_annotation_case_default_endpoint": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics/enabled": "true",
+ "io.opentelemetry.discovery.metrics/config": configNoEndpoint,
+ }, expectedConf: userConfigMap{
+ "collection_interval": "20s",
+ "endpoint": "1.1.1.1:8080",
+ "initial_delay": "20s",
+ "read_buffer_size": "10",
+ "nested_example": userConfigMap{"foo": "bar"},
+ }, defaultEndpoint: "1.1.1.1:8080",
+ scopeSuffix: "",
+ }, "simple_annotation_case_scoped": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics.8080/enabled": "true",
+ "io.opentelemetry.discovery.metrics.8080/config": config,
+ }, expectedConf: userConfigMap{
+ "collection_interval": "20s",
+ "endpoint": "0.0.0.0:8080",
+ "initial_delay": "20s",
+ "read_buffer_size": "10",
+ "nested_example": userConfigMap{"foo": "bar"},
+ }, defaultEndpoint: "0.0.0.0:8080",
+ scopeSuffix: "8080",
+ }, "simple_annotation_case_with_invalid_endpoint": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics/enabled": "true",
+ "io.opentelemetry.discovery.metrics/config": config,
+ }, expectedConf: userConfigMap{},
+ defaultEndpoint: "1.2.3.4:8080",
+ scopeSuffix: "",
+ expectError: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ conf, err := getScraperConfFromAnnotations(test.hintsAnn, test.defaultEndpoint, test.scopeSuffix, zaptest.NewLogger(t, zaptest.Level(zap.InfoLevel)))
+ if test.expectError {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(
+ t,
+ test.expectedConf,
+ conf)
+ }
+ })
+ }
+}
+
+func TestDiscoveryMetricsEnabled(t *testing.T) {
+ config := `
+endpoint: "0.0.0.0:8080"`
+ tests := map[string]struct {
+ hintsAnn map[string]string
+ expected bool
+ scopeSuffix string
+ }{
+ "test_enabled": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics/config": config,
+ "io.opentelemetry.discovery.metrics/enabled": "true",
+ },
+ expected: true,
+ scopeSuffix: "",
+ }, "test_disabled": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics/config": config,
+ "io.opentelemetry.discovery.metrics/enabled": "false",
+ },
+ expected: false,
+ scopeSuffix: "",
+ }, "test_enabled_scope": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics/config": config,
+ "io.opentelemetry.discovery.metrics.8080/enabled": "true",
+ },
+ expected: true,
+ scopeSuffix: "8080",
+ }, "test_disabled_scoped": {
+ hintsAnn: map[string]string{
+ "io.opentelemetry.discovery.metrics/config": config,
+ "io.opentelemetry.discovery.metrics.8080/enabled": "false",
+ },
+ expected: false,
+ scopeSuffix: "8080",
+ },
+ }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(
+ t,
+ test.expected,
+ discoveryMetricsEnabled(test.hintsAnn, otelMetricsHints, test.scopeSuffix),
+ )
+ })
+ }
+}
+
+func TestValidateEndpoint(t *testing.T) {
+ tests := map[string]struct {
+ endpoint string
+ defaultEndpoint string
+ expectError bool
+ }{
+ "test_valid": {
+ endpoint: "http://1.2.3.4:8080/stats",
+ defaultEndpoint: "1.2.3.4:8080",
+ expectError: false,
+ },
+ "test_invalid": {
+ endpoint: "http://0.0.0.0:8080/some?foo=1.2.3.4:8080",
+ defaultEndpoint: "1.2.3.4:8080",
+ expectError: true,
+ },
+ "test_valid_no_scheme": {
+ endpoint: "1.2.3.4:8080/stats",
+ defaultEndpoint: "1.2.3.4:8080",
+ expectError: false,
+ },
+ "test_valid_no_scheme_no_path": {
+ endpoint: "1.2.3.4:8080",
+ defaultEndpoint: "1.2.3.4:8080",
+ expectError: false,
+ },
+ "test_valid_no_scheme_dynamic": {
+ endpoint: "`endpoint`/stats",
+ defaultEndpoint: "1.2.3.4:8080",
+ expectError: false,
+ },
+ "test_valid_dynamic": {
+ endpoint: "http://`endpoint`/stats",
+ defaultEndpoint: "1.2.3.4:8080",
+ expectError: false,
+ },
+ }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ err := validateEndpoint(test.endpoint, test.defaultEndpoint)
+ if test.expectError {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/receiver/receivercreator/fixtures_test.go b/receiver/receivercreator/fixtures_test.go
index 069604d70344..7abffe5d986e 100644
--- a/receiver/receivercreator/fixtures_test.go
+++ b/receiver/receivercreator/fixtures_test.go
@@ -56,6 +56,28 @@ var portEndpoint = observer.Endpoint{
},
}
+var config = `
+int_field: 20`
+
+var portEndpointWithHints = observer.Endpoint{
+ ID: "namespace/pod-2-UID/redis(6379)",
+ Target: "1.2.3.4:6379",
+ Details: &observer.Port{
+ Name: "redis", Pod: observer.Pod{
+ Name: "pod-2",
+ Namespace: "default",
+ UID: "pod-2-UID",
+ Labels: map[string]string{"env": "prod"},
+ Annotations: map[string]string{
+ otelMetricsHints + "/enabled": "true",
+ otelMetricsHints + "/scraper": "with_endpoint",
+ otelMetricsHints + "/config": config,
+ },
+ },
+ Port: 6379,
+ },
+}
+
var hostportEndpoint = observer.Endpoint{
ID: "port-1",
Target: "localhost:1234",
diff --git a/receiver/receivercreator/go.mod b/receiver/receivercreator/go.mod
index ffde47ed455d..724006841de4 100644
--- a/receiver/receivercreator/go.mod
+++ b/receiver/receivercreator/go.mod
@@ -4,6 +4,7 @@ go 1.22.0
require (
github.com/expr-lang/expr v1.16.9
+ github.com/go-viper/mapstructure/v2 v2.2.1
github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.114.0
github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.114.0
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.114.0
@@ -26,6 +27,7 @@ require (
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
+ gopkg.in/yaml.v3 v3.0.1
)
require (
@@ -37,7 +39,6 @@ require (
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
@@ -122,7 +123,6 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
google.golang.org/grpc v1.67.1 // indirect
google.golang.org/protobuf v1.35.1 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer => ../../extension/observer
diff --git a/receiver/receivercreator/observerhandler.go b/receiver/receivercreator/observerhandler.go
index cdf5ed82b626..40244c8e7bfe 100644
--- a/receiver/receivercreator/observerhandler.go
+++ b/receiver/receivercreator/observerhandler.go
@@ -81,85 +81,28 @@ func (obs *observerHandler) OnAdd(added []observer.Endpoint) {
continue
}
- obs.params.TelemetrySettings.Logger.Debug("handling added endpoint", zap.Any("env", env))
-
- for _, template := range obs.config.receiverTemplates {
- if matches, e := template.rule.eval(env); e != nil {
- obs.params.TelemetrySettings.Logger.Error("failed matching rule", zap.String("rule", template.Rule), zap.Error(e))
- continue
- } else if !matches {
- continue
- }
-
- obs.params.TelemetrySettings.Logger.Info("starting receiver",
- zap.String("name", template.id.String()),
- zap.String("endpoint", e.Target),
- zap.String("endpoint_id", string(e.ID)))
-
- resolvedConfig, err := expandConfig(template.config, env)
+ if obs.config.Discovery.Enabled {
+ builder := createK8sHintsBuilder(obs.config.Discovery, obs.params.TelemetrySettings.Logger)
+ subreceiverTemplate, err := builder.createReceiverTemplateFromHints(env)
if err != nil {
- obs.params.TelemetrySettings.Logger.Error("unable to resolve template config", zap.String("receiver", template.id.String()), zap.Error(err))
- continue
- }
- obs.params.TelemetrySettings.Logger.Debug("resolved config", zap.String("receiver", template.id.String()), zap.Any("config", resolvedConfig))
-
- discoveredCfg := userConfigMap{}
- // If user didn't set endpoint set to default value as well as
- // flag indicating we've done this for later validation.
- if _, ok := resolvedConfig[endpointConfigKey]; !ok {
- discoveredCfg[endpointConfigKey] = e.Target
- discoveredCfg[tmpSetEndpointConfigKey] = struct{}{}
+ obs.params.TelemetrySettings.Logger.Error("could not extract configurations from K8s hints' annotations", zap.Any("err", err))
+ break
}
-
- // Though not necessary with contrib provided observers, nothing is stopping custom
- // ones from using expr in their Target values.
- discoveredConfig, err := expandConfig(discoveredCfg, env)
- if err != nil {
- obs.params.TelemetrySettings.Logger.Error("unable to resolve discovered config", zap.String("receiver", template.id.String()), zap.Error(err))
+ if subreceiverTemplate != nil {
+ obs.params.TelemetrySettings.Logger.Debug("adding K8s hinted receiver", zap.Any("subreceiver", subreceiverTemplate))
+ obs.startReceiver(*subreceiverTemplate, env, e)
continue
}
+ }
- resAttrs := map[string]string{}
- for k, v := range template.ResourceAttributes {
- strVal, ok := v.(string)
- if !ok {
- obs.params.TelemetrySettings.Logger.Info(fmt.Sprintf("ignoring unsupported `resource_attributes` %q value %v", k, v))
- continue
- }
- resAttrs[k] = strVal
- }
-
- // Adds default and/or configured resource attributes (e.g. k8s.pod.uid) to resources
- // as telemetry is emitted.
- var consumer *enhancingConsumer
- if consumer, err = newEnhancingConsumer(
- obs.config.ResourceAttributes,
- resAttrs,
- env,
- e,
- obs.nextLogsConsumer,
- obs.nextMetricsConsumer,
- obs.nextTracesConsumer,
- ); err != nil {
- obs.params.TelemetrySettings.Logger.Error("failed creating resource enhancer", zap.String("receiver", template.id.String()), zap.Error(err))
+ for _, template := range obs.config.receiverTemplates {
+ if matches, err := template.rule.eval(env); err != nil {
+ obs.params.TelemetrySettings.Logger.Error("failed matching rule", zap.String("rule", template.Rule), zap.Error(err))
continue
- }
-
- var receiver component.Component
- if receiver, err = obs.runner.start(
- receiverConfig{
- id: template.id,
- config: resolvedConfig,
- endpointID: e.ID,
- },
- discoveredConfig,
- consumer,
- ); err != nil {
- obs.params.TelemetrySettings.Logger.Error("failed to start receiver", zap.String("receiver", template.id.String()), zap.Error(err))
+ } else if !matches {
continue
}
-
- obs.receiversByEndpointID.Put(e.ID, receiver)
+ obs.startReceiver(template, env, e)
}
}
}
@@ -198,3 +141,88 @@ func (obs *observerHandler) OnChange(changed []observer.Endpoint) {
obs.OnRemove(changed)
obs.OnAdd(changed)
}
+
+func (obs *observerHandler) startReceiver(template receiverTemplate, env observer.EndpointEnv, e observer.Endpoint) {
+ obs.params.TelemetrySettings.Logger.Info("starting receiver",
+ zap.String("name", template.id.String()),
+ zap.String("endpoint", e.Target),
+ zap.String("endpoint_id", string(e.ID)),
+ zap.Any("config", template.config))
+
+ resolvedConfig, err := expandConfig(template.config, env)
+ if err != nil {
+ obs.params.TelemetrySettings.Logger.Error("unable to resolve template config", zap.String("receiver", template.id.String()), zap.Error(err))
+ return
+ }
+
+ discoveredCfg := userConfigMap{}
+ // If user didn't set endpoint set to default value as well as
+ // flag indicating we've done this for later validation.
+ if _, ok := resolvedConfig[endpointConfigKey]; !ok {
+ discoveredCfg[endpointConfigKey] = e.Target
+ discoveredCfg[tmpSetEndpointConfigKey] = struct{}{}
+ }
+
+ // Though not necessary with contrib provided observers, nothing is stopping custom
+ // ones from using expr in their Target values.
+ discoveredConfig, err := expandConfig(discoveredCfg, env)
+ if err != nil {
+ obs.params.TelemetrySettings.Logger.Error("unable to resolve discovered config", zap.String("receiver", template.id.String()), zap.Error(err))
+ return
+ }
+
+ resAttrs := map[string]string{}
+ for k, v := range template.ResourceAttributes {
+ strVal, ok := v.(string)
+ if !ok {
+ obs.params.TelemetrySettings.Logger.Info(fmt.Sprintf("ignoring unsupported `resource_attributes` %q value %v", k, v))
+ continue
+ }
+ resAttrs[k] = strVal
+ }
+
+ // Adds default and/or configured resource attributes (e.g. k8s.pod.uid) to resources
+ // as telemetry is emitted.
+ var consumer *enhancingConsumer
+ if consumer, err = newEnhancingConsumer(
+ obs.config.ResourceAttributes,
+ resAttrs,
+ env,
+ e,
+ obs.nextLogsConsumer,
+ obs.nextMetricsConsumer,
+ obs.nextTracesConsumer,
+ ); err != nil {
+ obs.params.TelemetrySettings.Logger.Error("failed creating resource enhancer", zap.String("receiver", template.id.String()), zap.Error(err))
+ return
+ }
+
+ filterConsumerSignals(consumer, template.signals)
+
+ var receiver component.Component
+ if receiver, err = obs.runner.start(
+ receiverConfig{
+ id: template.id,
+ config: resolvedConfig,
+ endpointID: e.ID,
+ },
+ discoveredConfig,
+ consumer,
+ ); err != nil {
+ obs.params.TelemetrySettings.Logger.Error("failed to start receiver", zap.String("receiver", template.id.String()), zap.Error(err))
+ return
+ }
+ obs.receiversByEndpointID.Put(e.ID, receiver)
+}
+
+func filterConsumerSignals(consumer *enhancingConsumer, signals receiverSignals) {
+ if !signals.metrics {
+ consumer.metrics = nil
+ }
+ if !signals.logs {
+ consumer.logs = nil
+ }
+ if !signals.metrics {
+ consumer.traces = nil
+ }
+}
diff --git a/receiver/receivercreator/observerhandler_test.go b/receiver/receivercreator/observerhandler_test.go
index 14cd5e7a7c97..8b91da5064a2 100644
--- a/receiver/receivercreator/observerhandler_test.go
+++ b/receiver/receivercreator/observerhandler_test.go
@@ -78,6 +78,7 @@ func TestOnAddForMetrics(t *testing.T) {
rule: portRule,
Rule: `type == "port"`,
ResourceAttributes: map[string]any{},
+ signals: receiverSignals{true, true, true},
},
}
@@ -121,6 +122,66 @@ func TestOnAddForMetrics(t *testing.T) {
}
}
+func TestOnAddForMetricsWithHints(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ expectedReceiverType component.Component
+ expectedReceiverConfig component.Config
+ expectedError string
+ }{
+ {
+ name: "dynamically set with supported endpoint",
+ expectedReceiverType: &nopWithEndpointReceiver{},
+ expectedReceiverConfig: &nopWithEndpointConfig{
+ IntField: 20,
+ Endpoint: "1.2.3.4:6379",
+ },
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ cfg := createDefaultConfig().(*Config)
+ cfg.Discovery.Enabled = true
+
+ handler, mr := newObserverHandler(t, cfg, nil, consumertest.NewNop(), nil)
+ handler.OnAdd([]observer.Endpoint{
+ portEndpointWithHints,
+ unsupportedEndpoint,
+ })
+
+ if test.expectedError != "" {
+ assert.Equal(t, 0, handler.receiversByEndpointID.Size())
+ require.Error(t, mr.lastError)
+ require.ErrorContains(t, mr.lastError, test.expectedError)
+ require.Nil(t, mr.startedComponent)
+ return
+ }
+
+ assert.Equal(t, 1, handler.receiversByEndpointID.Size())
+ require.NoError(t, mr.lastError)
+ require.NotNil(t, mr.startedComponent)
+
+ wr, ok := mr.startedComponent.(*wrappedReceiver)
+ require.True(t, ok)
+
+ require.Nil(t, wr.logs)
+ require.Nil(t, wr.traces)
+
+ var actualConfig component.Config
+ switch v := wr.metrics.(type) {
+ case *nopWithEndpointReceiver:
+ require.NotNil(t, v)
+ actualConfig = v.cfg
+ case *nopWithoutEndpointReceiver:
+ require.NotNil(t, v)
+ actualConfig = v.cfg
+ default:
+ t.Fatalf("unexpected startedComponent: %T", v)
+ }
+ require.Equal(t, test.expectedReceiverConfig, actualConfig)
+ })
+ }
+}
+
func TestOnAddForLogs(t *testing.T) {
for _, test := range []struct {
name string
@@ -180,6 +241,7 @@ func TestOnAddForLogs(t *testing.T) {
rule: portRule,
Rule: `type == "port"`,
ResourceAttributes: map[string]any{},
+ signals: receiverSignals{metrics: true, logs: true, traces: true},
},
}
@@ -282,6 +344,7 @@ func TestOnAddForTraces(t *testing.T) {
rule: portRule,
Rule: `type == "port"`,
ResourceAttributes: map[string]any{},
+ signals: receiverSignals{metrics: true, logs: true, traces: true},
},
}
@@ -338,6 +401,7 @@ func TestOnRemoveForMetrics(t *testing.T) {
rule: portRule,
Rule: `type == "port"`,
ResourceAttributes: map[string]any{},
+ signals: receiverSignals{metrics: true, logs: true, traces: true},
},
}
handler, r := newObserverHandler(t, cfg, nil, consumertest.NewNop(), nil)
@@ -367,6 +431,7 @@ func TestOnRemoveForLogs(t *testing.T) {
rule: portRule,
Rule: `type == "port"`,
ResourceAttributes: map[string]any{},
+ signals: receiverSignals{metrics: true, logs: true, traces: true},
},
}
handler, r := newObserverHandler(t, cfg, consumertest.NewNop(), nil, nil)
@@ -396,6 +461,7 @@ func TestOnChange(t *testing.T) {
rule: portRule,
Rule: `type == "port"`,
ResourceAttributes: map[string]any{},
+ signals: receiverSignals{metrics: true, logs: true, traces: true},
},
}
handler, r := newObserverHandler(t, cfg, nil, consumertest.NewNop(), nil)
From 4f5671a5bf43ca163f9f54237fe39c59662d1fe6 Mon Sep 17 00:00:00 2001
From: Mackenzie <63265430+mackjmr@users.noreply.github.com>
Date: Wed, 27 Nov 2024 10:00:14 +0100
Subject: [PATCH 05/23] [chore][exporter/influxdb] Use NewDefaultClientConfig
instead of manually creating struct (#35521)
**Description:**
This PR makes usage of `NewDefaultClientConfig` instead of manually
creating the confighttp.ClientConfig struct.
**Link to tracking Issue:** #35457
---------
Co-authored-by: Ziqi Zhao
---
exporter/influxdbexporter/config_test.go | 10 +++++-----
exporter/influxdbexporter/factory.go | 13 +++++++------
exporter/influxdbexporter/writer_test.go | 6 +++---
3 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/exporter/influxdbexporter/config_test.go b/exporter/influxdbexporter/config_test.go
index 7bcb99dff6f6..6459d4cad16a 100644
--- a/exporter/influxdbexporter/config_test.go
+++ b/exporter/influxdbexporter/config_test.go
@@ -22,6 +22,10 @@ import (
)
func TestLoadConfig(t *testing.T) {
+ clientConfig := confighttp.NewDefaultClientConfig()
+ clientConfig.Endpoint = "http://localhost:8080"
+ clientConfig.Timeout = 500 * time.Millisecond
+ clientConfig.Headers = map[string]configopaque.String{"User-Agent": "OpenTelemetry -> Influx"}
t.Parallel()
cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml"))
@@ -38,11 +42,7 @@ func TestLoadConfig(t *testing.T) {
{
id: component.NewIDWithName(metadata.Type, "override-config"),
expected: &Config{
- ClientConfig: confighttp.ClientConfig{
- Endpoint: "http://localhost:8080",
- Timeout: 500 * time.Millisecond,
- Headers: map[string]configopaque.String{"User-Agent": "OpenTelemetry -> Influx"},
- },
+ ClientConfig: clientConfig,
QueueSettings: exporterhelper.QueueConfig{
Enabled: true,
NumConsumers: 3,
diff --git a/exporter/influxdbexporter/factory.go b/exporter/influxdbexporter/factory.go
index fabf1a5787c1..d9cfa42ae311 100644
--- a/exporter/influxdbexporter/factory.go
+++ b/exporter/influxdbexporter/factory.go
@@ -34,13 +34,14 @@ func NewFactory() exporter.Factory {
}
func createDefaultConfig() component.Config {
+ clientConfig := confighttp.NewDefaultClientConfig()
+ clientConfig.Timeout = 5 * time.Second
+ clientConfig.Headers = map[string]configopaque.String{
+ "User-Agent": "OpenTelemetry -> Influx",
+ }
+
return &Config{
- ClientConfig: confighttp.ClientConfig{
- Timeout: 5 * time.Second,
- Headers: map[string]configopaque.String{
- "User-Agent": "OpenTelemetry -> Influx",
- },
- },
+ ClientConfig: clientConfig,
QueueSettings: exporterhelper.NewDefaultQueueConfig(),
BackOffConfig: configretry.NewDefaultBackOffConfig(),
MetricsSchema: common.MetricsSchemaTelegrafPrometheusV1.String(),
diff --git a/exporter/influxdbexporter/writer_test.go b/exporter/influxdbexporter/writer_test.go
index 36fdffa3bbd4..8970bf6b29dc 100644
--- a/exporter/influxdbexporter/writer_test.go
+++ b/exporter/influxdbexporter/writer_test.go
@@ -158,13 +158,13 @@ func Test_influxHTTPWriterBatch_EnqueuePoint_emptyTagValue(t *testing.T) {
t.Cleanup(noopHTTPServer.Close)
nowTime := time.Unix(1000, 2000)
+ clientConfig := confighttp.NewDefaultClientConfig()
+ clientConfig.Endpoint = noopHTTPServer.URL
influxWriter, err := newInfluxHTTPWriter(
new(common.NoopLogger),
&Config{
- ClientConfig: confighttp.ClientConfig{
- Endpoint: noopHTTPServer.URL,
- },
+ ClientConfig: clientConfig,
},
componenttest.NewNopTelemetrySettings())
require.NoError(t, err)
From 6c43968fdcbe833c724da359efb91018d81ddafe Mon Sep 17 00:00:00 2001
From: Alok Kumar Singh <62210712+akstron@users.noreply.github.com>
Date: Wed, 27 Nov 2024 01:05:00 -0800
Subject: [PATCH 06/23] [cmd/opampsupervisor] Enable Strict Unmarshal for
Supervisor Configuration (#36148)
#### Description
The changes includes providing a custom `DecoderConfig` with
`ErrorUnused` enabled instead of using the default `DecoderConfig`
provided by `koanf`
#### Link to tracking issue
Fixes: #35838
---------
Signed-off-by: Alok Kumar Singh
Co-authored-by: Evan Bradley <11745660+evan-bradley@users.noreply.github.com>
---
.chloggen/strict-unmarshal.yaml | 27 +++++++++++++++++
cmd/opampsupervisor/go.mod | 2 +-
.../supervisor/config/config.go | 11 ++++++-
.../supervisor/supervisor_test.go | 29 +++++++++++++++++++
4 files changed, 67 insertions(+), 2 deletions(-)
create mode 100644 .chloggen/strict-unmarshal.yaml
diff --git a/.chloggen/strict-unmarshal.yaml b/.chloggen/strict-unmarshal.yaml
new file mode 100644
index 000000000000..eef42e520340
--- /dev/null
+++ b/.chloggen/strict-unmarshal.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: 'breaking'
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: opampsupervisor
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: "Enable strict unmarshalling of the OpAMP Supervisor config file. An error will now be returned if an invalid config key is set."
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35838]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/cmd/opampsupervisor/go.mod b/cmd/opampsupervisor/go.mod
index 9e990ce8661a..733a8867454f 100644
--- a/cmd/opampsupervisor/go.mod
+++ b/cmd/opampsupervisor/go.mod
@@ -4,6 +4,7 @@ go 1.22.0
require (
github.com/cenkalti/backoff/v4 v4.3.0
+ github.com/go-viper/mapstructure/v2 v2.2.1
github.com/google/uuid v1.6.0
github.com/knadh/koanf/maps v0.1.1
github.com/knadh/koanf/parsers/yaml v0.1.0
@@ -25,7 +26,6 @@ require (
require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
- github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
diff --git a/cmd/opampsupervisor/supervisor/config/config.go b/cmd/opampsupervisor/supervisor/config/config.go
index 68c99fa4c755..4bc88b63fff1 100644
--- a/cmd/opampsupervisor/supervisor/config/config.go
+++ b/cmd/opampsupervisor/supervisor/config/config.go
@@ -13,6 +13,7 @@ import (
"runtime"
"time"
+ "github.com/go-viper/mapstructure/v2"
"github.com/knadh/koanf/parsers/yaml"
"github.com/knadh/koanf/providers/file"
"github.com/knadh/koanf/v2"
@@ -41,11 +42,19 @@ func Load(configFile string) (Supervisor, error) {
return Supervisor{}, err
}
+ cfg := DefaultSupervisor()
+
decodeConf := koanf.UnmarshalConf{
Tag: "mapstructure",
+ DecoderConfig: &mapstructure.DecoderConfig{
+ DecodeHook: mapstructure.ComposeDecodeHookFunc(
+ mapstructure.StringToTimeDurationHookFunc()),
+ Result: &cfg,
+ WeaklyTypedInput: true,
+ ErrorUnused: true,
+ },
}
- cfg := DefaultSupervisor()
if err := k.UnmarshalWithConf("", &cfg, decodeConf); err != nil {
return Supervisor{}, fmt.Errorf("cannot parse %s: %w", configFile, err)
}
diff --git a/cmd/opampsupervisor/supervisor/supervisor_test.go b/cmd/opampsupervisor/supervisor/supervisor_test.go
index 3a034bad4113..24301fc19626 100644
--- a/cmd/opampsupervisor/supervisor/supervisor_test.go
+++ b/cmd/opampsupervisor/supervisor/supervisor_test.go
@@ -1366,3 +1366,32 @@ service:
require.NoError(t, err)
require.Equal(t, expectedConfig, noopConfig)
}
+
+func TestSupervisor_configStrictUnmarshal(t *testing.T) {
+ tmpDir, err := os.MkdirTemp(os.TempDir(), "*")
+ require.NoError(t, err)
+
+ configuration := `
+server:
+ endpoint: ws://localhost/v1/opamp
+ tls:
+ insecure: true
+
+capabilities:
+ reports_effective_config: true
+ invalid_key: invalid_value
+`
+
+ cfgPath := filepath.Join(tmpDir, "config.yaml")
+ err = os.WriteFile(cfgPath, []byte(configuration), 0o600)
+ require.NoError(t, err)
+
+ _, err = config.Load(cfgPath)
+ require.Error(t, err)
+ require.ErrorContains(t, err, "cannot parse")
+
+ t.Cleanup(func() {
+ require.NoError(t, os.Chmod(tmpDir, 0o700))
+ require.NoError(t, os.RemoveAll(tmpDir))
+ })
+}
From af5200a28a249c90bdbf06dbc4954b2ed2d23bbc Mon Sep 17 00:00:00 2001
From: Sam DeHaan
Date: Wed, 27 Nov 2024 06:03:05 -0500
Subject: [PATCH 07/23] [exporter/loadbalancing] Update k8sresolver handler to
properly manage update events (#36505)
#### Description
The load balancing exporter's k8sresolver was not handling update events
properly. The `callback` function was being executed after cleanup of
old endpoints and also after adding new endpoints. This causes exporter
churn in the case of an event in which the lists contain shared
elements. See the
[documentation](https://pkg.go.dev/k8s.io/client-go/tools/cache#ResourceEventHandler)
for examples where the state might change but the IP Addresses would
not, including the regular re-list events that might have zero changes.
#### Link to tracking issue
Fixes
https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35658
May be related to
https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35810
as well.
#### Testing
Added tests for no-change onChange call.
---
.chloggen/single-callback-k8sresolver.yaml | 27 +++++++++++++
.../resolver_k8s_handler.go | 40 +++++++++++--------
.../resolver_k8s_test.go | 40 +++++++++++++++++++
3 files changed, 91 insertions(+), 16 deletions(-)
create mode 100644 .chloggen/single-callback-k8sresolver.yaml
diff --git a/.chloggen/single-callback-k8sresolver.yaml b/.chloggen/single-callback-k8sresolver.yaml
new file mode 100644
index 000000000000..b0eb561b1d46
--- /dev/null
+++ b/.chloggen/single-callback-k8sresolver.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: bug_fix
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: loadbalancingexporter
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: The k8sresolver in loadbalancingexporter was triggering exporter churn in the way the change event was handled.
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35658]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/exporter/loadbalancingexporter/resolver_k8s_handler.go b/exporter/loadbalancingexporter/resolver_k8s_handler.go
index 0eac62ea40d2..186111eba4d5 100644
--- a/exporter/loadbalancingexporter/resolver_k8s_handler.go
+++ b/exporter/loadbalancingexporter/resolver_k8s_handler.go
@@ -25,7 +25,7 @@ type handler struct {
}
func (h handler) OnAdd(obj any, _ bool) {
- var endpoints []string
+ var endpoints map[string]bool
switch object := obj.(type) {
case *corev1.Endpoints:
@@ -36,7 +36,7 @@ func (h handler) OnAdd(obj any, _ bool) {
return
}
changed := false
- for _, ep := range endpoints {
+ for ep := range endpoints {
if _, loaded := h.endpoints.LoadOrStore(ep, true); !loaded {
changed = true
}
@@ -49,28 +49,36 @@ func (h handler) OnAdd(obj any, _ bool) {
func (h handler) OnUpdate(oldObj, newObj any) {
switch oldEps := oldObj.(type) {
case *corev1.Endpoints:
- epRemove := convertToEndpoints(oldEps)
- for _, ep := range epRemove {
- h.endpoints.Delete(ep)
- }
- if len(epRemove) > 0 {
- _, _ = h.callback(context.Background())
- }
-
newEps, ok := newObj.(*corev1.Endpoints)
if !ok {
h.logger.Warn("Got an unexpected Kubernetes data type during the update of the pods for a service", zap.Any("obj", newObj))
h.telemetry.LoadbalancerNumResolutions.Add(context.Background(), 1, metric.WithAttributeSet(k8sResolverFailureAttrSet))
return
}
+
+ oldEndpoints := convertToEndpoints(oldEps)
+ newEndpoints := convertToEndpoints(newEps)
changed := false
- for _, ep := range convertToEndpoints(newEps) {
+
+ // Iterate through old endpoints and remove those that are not in the new list.
+ for ep := range oldEndpoints {
+ if _, ok := newEndpoints[ep]; !ok {
+ h.endpoints.Delete(ep)
+ changed = true
+ }
+ }
+
+ // Iterate through new endpoints and add those that are not in the endpoints map already.
+ for ep := range newEndpoints {
if _, loaded := h.endpoints.LoadOrStore(ep, true); !loaded {
changed = true
}
}
+
if changed {
_, _ = h.callback(context.Background())
+ } else {
+ h.logger.Debug("No changes detected in the endpoints for the service", zap.Any("old", oldEps), zap.Any("new", newEps))
}
default: // unsupported
h.logger.Warn("Got an unexpected Kubernetes data type during the update of the pods for a service", zap.Any("obj", oldObj))
@@ -80,7 +88,7 @@ func (h handler) OnUpdate(oldObj, newObj any) {
}
func (h handler) OnDelete(obj any) {
- var endpoints []string
+ var endpoints map[string]bool
switch object := obj.(type) {
case *cache.DeletedFinalStateUnknown:
h.OnDelete(object.Obj)
@@ -95,19 +103,19 @@ func (h handler) OnDelete(obj any) {
return
}
if len(endpoints) != 0 {
- for _, endpoint := range endpoints {
+ for endpoint := range endpoints {
h.endpoints.Delete(endpoint)
}
_, _ = h.callback(context.Background())
}
}
-func convertToEndpoints(eps ...*corev1.Endpoints) []string {
- var ipAddress []string
+func convertToEndpoints(eps ...*corev1.Endpoints) map[string]bool {
+ ipAddress := map[string]bool{}
for _, ep := range eps {
for _, subsets := range ep.Subsets {
for _, addr := range subsets.Addresses {
- ipAddress = append(ipAddress, addr.IP)
+ ipAddress[addr.IP] = true
}
}
}
diff --git a/exporter/loadbalancingexporter/resolver_k8s_test.go b/exporter/loadbalancingexporter/resolver_k8s_test.go
index 71cec20f9bfd..5a4e77dd593b 100644
--- a/exporter/loadbalancingexporter/resolver_k8s_test.go
+++ b/exporter/loadbalancingexporter/resolver_k8s_test.go
@@ -77,6 +77,7 @@ func TestK8sResolve(t *testing.T) {
name string
args args
simulateFn func(*suiteContext, args) error
+ onChangeFn func([]string)
verifyFn func(*suiteContext, args) error
}{
{
@@ -116,6 +117,41 @@ func TestK8sResolve(t *testing.T) {
return nil
},
},
+ {
+ name: "simulate re-list that does not change endpoints",
+ args: args{
+ logger: zap.NewNop(),
+ service: "lb",
+ namespace: "default",
+ ports: []int32{8080, 9090},
+ },
+ simulateFn: func(suiteCtx *suiteContext, args args) error {
+ exist := suiteCtx.endpoint.DeepCopy()
+ patch := client.MergeFrom(exist)
+ data, err := patch.Data(exist)
+ if err != nil {
+ return err
+ }
+ _, err = suiteCtx.clientset.CoreV1().Endpoints(args.namespace).
+ Patch(context.TODO(), args.service, types.MergePatchType, data, metav1.PatchOptions{})
+ return err
+ },
+ onChangeFn: func([]string) {
+ assert.Fail(t, "should not call onChange")
+ },
+ verifyFn: func(ctx *suiteContext, _ args) error {
+ if _, err := ctx.resolver.resolve(context.Background()); err != nil {
+ return err
+ }
+
+ assert.Equal(t, []string{
+ "192.168.10.100:8080",
+ "192.168.10.100:9090",
+ }, ctx.resolver.Endpoints(), "resolver failed, endpoints not equal")
+
+ return nil
+ },
+ },
{
name: "simulate change the backend ip address",
args: args{
@@ -177,6 +213,10 @@ func TestK8sResolve(t *testing.T) {
suiteCtx, teardownSuite := setupSuite(t, tt.args)
defer teardownSuite(t)
+ if tt.onChangeFn != nil {
+ suiteCtx.resolver.onChange(tt.onChangeFn)
+ }
+
err := tt.simulateFn(suiteCtx, tt.args)
assert.NoError(t, err)
From 8e0ea012fe93a272b78e27a94e690084538b0963 Mon Sep 17 00:00:00 2001
From: Vyacheslav Stepanov
Date: Wed, 27 Nov 2024 15:30:33 +0200
Subject: [PATCH 08/23] [exporter/loadbalancing] Add top level sending_queue,
retry_on_failure and timeout settings (#36094)
#### Description
##### Problem statement
`loadbalancing` exporter is actually a wrapper that's creates and
manages set of actual `otlp` exporters
Those `otlp` exporters technically shares same configuration parameters
that are defined on `loadbalancing` exporter level, including
`sending_queue` configuration. The only difference is `endpoint`
parameter that are substituted by `loadbalancing` exporter itself
This means, that `sending_queue`, `retry_on_failure` and `timeout`
settings can be defined only on `otlp` sub-exporters, while top-level
`loadbalancing` exporter is missing all those settings
This configuration approach produces several issue, that are already
reported by users:
* Impossibility to use Persistent Queue in `loadbalancing` exporter (see
#16826). That's happens because `otlp` sub-exporters are sharing the
same configurations, including configuration of the queue, i.e. they all
are using the same `storage` instance at the same time which is not
possible at the moment
* Data loss even using `sending_queue` configuration (see #35378).
That's happens because Queue is defined on level of `otlp` sub-exporters
and if this exporter cannot flush data from queue (for example, endpoint
is not available anymore) there is no other options that just to discard
data from queue, i.e. there is no higher level queue and persistent
storage where data can be returned is case of permanent failure
There might be some other potential issue that was already tracked and
related to current configuration approach
##### Proposed solution
The easiest way to solve issues above - is to use standard approach for
queue, retry and timeout configuration using `exporterhelper`
This will bring queue, retry and timeout functionality to the top-level
of `loadbalancing` exporter, instead of `otlp` sub-exporters
Related to mentioned issues it will bring:
* Single Persistent Queue, that is used by all `otlp` sub-exporters (not
directly of course)
* Queue will not be discarded/destroyed if any (or all) of endpoint that
are unreachable anymore, top-level queue will keep data until new
endpoints will be available
* Scale-up and scale-down event for next layer of OpenTelemetry
Collectors in K8s environments will be more predictable, and will not
include data loss anymore (potential fix for #33959). There is still a
big chance of inconsistency when some data will be send to incorrect
endpoint, but it's already better state that we have right now
##### Noticeable changes
* `loadbalancing` exporter on top-level now uses `exporterhelper` with
all supported functionality by it
* `sending_queue` will be automatically disabled on `otlp` exporters
when it already present on top-level `loadbalancing` exporter. This
change is done to prevent data loss on `otlp` exporters because queue
there doesn't provide expected result. Also it will prevent potential
misconfiguration from user side and as result - irrelevant reported
issues
* `exporter` attribute for metrics generated from `otlp` sub-exporters
now includes endpoint for better visibility and to segregate them from
top-level `loadbalancing` exporter - was `"exporter": "loadbalancing"`,
now `"exporter": "loadbalancing/127.0.0.1:4317"`
* logs, generated by `otlp` sub-exporters now includes additional
attribute `endpoint` with endpoint value with the same reasons as for
metrics
#### Link to tracking issue
Fixes #35378
Fixes #16826
#### Testing
Proposed changes was heavily tested on large K8s environment with set of
different scale-up/scale-down event using persistent queue configuration
- no data loss were detected, everything works as expected
#### Documentation
`README.md` was updated to reflect new configuration parameters
available. Sample `config.yaml` was updated as well
---
.../feat_loadbalancing-exporter-queue.yaml | 29 +++++
exporter/loadbalancingexporter/README.md | 103 ++++++++++++++++-
exporter/loadbalancingexporter/config.go | 6 +
exporter/loadbalancingexporter/factory.go | 109 +++++++++++++++++-
.../loadbalancingexporter/factory_test.go | 99 ++++++++++++++++
exporter/loadbalancingexporter/go.mod | 2 +-
.../loadbalancingexporter/log_exporter.go | 4 +-
.../loadbalancingexporter/metrics_exporter.go | 4 +-
.../metrics_exporter_test.go | 32 -----
.../testdata/config.yaml | 11 +-
.../loadbalancingexporter/trace_exporter.go | 10 +-
.../trace_exporter_test.go | 34 ------
12 files changed, 354 insertions(+), 89 deletions(-)
create mode 100644 .chloggen/feat_loadbalancing-exporter-queue.yaml
diff --git a/.chloggen/feat_loadbalancing-exporter-queue.yaml b/.chloggen/feat_loadbalancing-exporter-queue.yaml
new file mode 100644
index 000000000000..d65a0e1d8d32
--- /dev/null
+++ b/.chloggen/feat_loadbalancing-exporter-queue.yaml
@@ -0,0 +1,29 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: loadbalancingexporter
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Adding sending_queue, retry_on_failure and timeout settings to loadbalancing exporter configuration
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35378,16826]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext: |
+ When switching to top-level sending_queue configuration - users should carefully review queue size
+ In some rare cases setting top-level queue size to n*queueSize might be not enough to prevent data loss
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/exporter/loadbalancingexporter/README.md b/exporter/loadbalancingexporter/README.md
index 2b6745341129..eecaa3e389fd 100644
--- a/exporter/loadbalancingexporter/README.md
+++ b/exporter/loadbalancingexporter/README.md
@@ -48,14 +48,39 @@ This also supports service name based exporting for traces. If you have two or m
## Resilience and scaling considerations
-The `loadbalancingexporter` will, irrespective of the chosen resolver (`static`, `dns`, `k8s`), create one exporter per endpoint. The exporter conforms to its published configuration regarding sending queue and retry mechanisms. Importantly, the `loadbalancingexporter` will not attempt to re-route data to a healthy endpoint on delivery failure, and data loss is therefore possible if the exporter's target remains unavailable once redelivery is exhausted. Due consideration needs to be given to the exporter queue and retry configuration when running in a highly elastic environment.
+The `loadbalancingexporter` will, irrespective of the chosen resolver (`static`, `dns`, `k8s`), create one `otlp` exporter per endpoint. Each level of exporters, `loadbalancingexporter` itself and all sub-exporters (one per each endpoint), have it's own queue, timeout and retry mechanisms. Importantly, the `loadbalancingexporter`, by default, will NOT attempt to re-route data to a healthy endpoint on delivery failure, because in-memory queue, retry and timeout setting are disabled by default ([more details on queuing, retry and timeout default settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)).
-* When using the `static` resolver and a target is unavailable, all the target's load-balanced telemetry will fail to be delivered until either the target is restored or removed from the static list. The same principle applies to the `dns` resolver.
+```
+ +------------------+ +---------------+
+ resiliency options 1 | | | |
+ -- otlp exporter 1 ------------ backend 1 |
+ | ---/ | | | |
+ | ---/ +----|-------------+ +---------------+
+ | ---/ |
+ +-----------------+ ---/ |
+ | --/ |
+ | loadbalancing | resiliency options 2
+ | exporter | |
+ | --\ |
+ +-----------------+ ----\ |
+ ----\ +----|-------------+ +---------------+
+ ----\ | | | |
+ --- otlp exporter N ------------ backend N |
+ | | | |
+ +------------------+ +---------------+
+```
+
+* For all types of resolvers (`static`, `dns`, `k8s`) - if one of endpoints is unavailable - first works queue, retry and timeout settings defined for sub-exporters (under `otlp` property). Once redelivery is exhausted on sub-exporter level, and resilience options 1 are enabled - telemetry data returns to `loadbalancingexporter` itself and data redelivery happens according to exporter level queue, retry and timeout settings.
+* When using the `static` resolver and all targets are unavailable, all load-balanced telemetry will fail to be delivered until either one or all targets are restored or valid target is added the static list. The same principle applies to the `dns` and `k8s` resolvers, except for endpoints list update which happens automatically.
* When using `k8s`, `dns`, and likely future resolvers, topology changes are eventually reflected in the `loadbalancingexporter`. The `k8s` resolver will update more quickly than `dns`, but a window of time in which the true topology doesn't match the view of the `loadbalancingexporter` remains.
+* Resiliency options 1 (`timeout`, `retry_on_failure` and `sending_queue` settings in `loadbalancing` section) - are useful for highly elastic environment (like k8s), where list of resolved endpoints frequently changed due to deployments, scale-up or scale-down events. In case of permanent change of list of resolved exporters this options provide capability to re-route data into new set of healthy backends. Disabled by default.
+* Resiliency options 1 (`timeout`, `retry_on_failure` and `sending_queue` settings in `otlp` section) - are useful for temporary problems with specific backend, like network flukes. Persistent Queue is NOT supported here as all sub-exporter shares the same `sending_queue` configuration, including `storage`. Enabled by default.
+
+Unfortunately, data loss is still possible if all of the exporter's targets remains unavailable once redelivery is exhausted. Due consideration needs to be given to the exporter queue and retry configuration when running in a highly elastic environment.
## Configuration
-Refer to [config.yaml](./testdata/config.yaml) for detailed examples on using the processor.
+Refer to [config.yaml](./testdata/config.yaml) for detailed examples on using the exporter.
* The `otlp` property configures the template used for building the OTLP exporter. Refer to the OTLP Exporter documentation for information on which options are available. Note that the `endpoint` property should not be set and will be overridden by this exporter with the backend endpoint.
* The `resolver` accepts a `static` node, a `dns`, a `k8s` service or `aws_cloud_map`. If all four are specified, an `errMultipleResolversProvided` error will be thrown.
@@ -90,6 +115,7 @@ Refer to [config.yaml](./testdata/config.yaml) for detailed examples on using th
* `traceID`: Routes spans based on their `traceID`. Invalid for metrics.
* `metric`: Routes metrics based on their metric name. Invalid for spans.
* `streamID`: Routes metrics based on their datapoint streamID. That's the unique hash of all it's attributes, plus the attributes and identifying information of its resource, scope, and metric data
+* loadbalancing exporter supports set of standard [queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), but they are disable by default to maintain compatibility
Simple example
@@ -117,11 +143,76 @@ exporters:
- backend-2:4317
- backend-3:4317
- backend-4:4317
- # Notice to config a headless service DNS in Kubernetes
+ # Notice to config a headless service DNS in Kubernetes
+ # dns:
+ # hostname: otelcol-headless.observability.svc.cluster.local
+
+service:
+ pipelines:
+ traces:
+ receivers:
+ - otlp
+ processors: []
+ exporters:
+ - loadbalancing
+ logs:
+ receivers:
+ - otlp
+ processors: []
+ exporters:
+ - loadbalancing
+```
+
+Persistent queue, retry and timeout usage example:
+
+```yaml
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: localhost:4317
+
+processors:
+
+exporters:
+ loadbalancing:
+ timeout: 10s
+ retry_on_failure:
+ enabled: true
+ initial_interval: 5s
+ max_interval: 30s
+ max_elapsed_time: 300s
+ sending_queue:
+ enabled: true
+ num_consumers: 2
+ queue_size: 1000
+ storage: file_storage/otc
+ routing_key: "service"
+ protocol:
+ otlp:
+ # all options from the OTLP exporter are supported
+ # except the endpoint
+ timeout: 1s
+ sending_queue:
+ enabled: true
+ resolver:
+ static:
+ hostnames:
+ - backend-1:4317
+ - backend-2:4317
+ - backend-3:4317
+ - backend-4:4317
+ # Notice to config a headless service DNS in Kubernetes
# dns:
- # hostname: otelcol-headless.observability.svc.cluster.local
+ # hostname: otelcol-headless.observability.svc.cluster.local
+
+extensions:
+ file_storage/otc:
+ directory: /var/lib/storage/otc
+ timeout: 10s
service:
+ extensions: [file_storage]
pipelines:
traces:
receivers:
@@ -334,7 +425,7 @@ service:
## Metrics
-The following metrics are recorded by this processor:
+The following metrics are recorded by this exporter:
* `otelcol_loadbalancer_num_resolutions` represents the total number of resolutions performed by the resolver specified in the tag `resolver`, split by their outcome (`success=true|false`). For the static resolver, this should always be `1` with the tag `success=true`.
* `otelcol_loadbalancer_num_backends` informs how many backends are currently in use. It should always match the number of items specified in the configuration file in case the `static` resolver is used, and should eventually (seconds) catch up with the DNS changes. Note that DNS caches that might exist between the load balancer and the record authority will influence how long it takes for the load balancer to see the change.
diff --git a/exporter/loadbalancingexporter/config.go b/exporter/loadbalancingexporter/config.go
index 8496268de7ed..b9682df16892 100644
--- a/exporter/loadbalancingexporter/config.go
+++ b/exporter/loadbalancingexporter/config.go
@@ -7,6 +7,8 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/servicediscovery/types"
+ "go.opentelemetry.io/collector/config/configretry"
+ "go.opentelemetry.io/collector/exporter/exporterhelper"
"go.opentelemetry.io/collector/exporter/otlpexporter"
)
@@ -30,6 +32,10 @@ const (
// Config defines configuration for the exporter.
type Config struct {
+ TimeoutSettings exporterhelper.TimeoutConfig `mapstructure:",squash"`
+ configretry.BackOffConfig `mapstructure:"retry_on_failure"`
+ QueueSettings exporterhelper.QueueConfig `mapstructure:"sending_queue"`
+
Protocol Protocol `mapstructure:"protocol"`
Resolver ResolverSettings `mapstructure:"resolver"`
RoutingKey string `mapstructure:"routing_key"`
diff --git a/exporter/loadbalancingexporter/factory.go b/exporter/loadbalancingexporter/factory.go
index f1c37e151757..1e10395162c4 100644
--- a/exporter/loadbalancingexporter/factory.go
+++ b/exporter/loadbalancingexporter/factory.go
@@ -7,14 +7,21 @@ package loadbalancingexporter // import "github.com/open-telemetry/opentelemetry
import (
"context"
+ "fmt"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/exporter"
+ "go.opentelemetry.io/collector/exporter/exporterhelper"
"go.opentelemetry.io/collector/exporter/otlpexporter"
+ "go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata"
)
+const (
+ zapEndpointKey = "endpoint"
+)
+
// NewFactory creates a factory for the exporter.
func NewFactory() exporter.Factory {
return exporter.NewFactory(
@@ -32,20 +39,110 @@ func createDefaultConfig() component.Config {
otlpDefaultCfg.Endpoint = "placeholder:4317"
return &Config{
+ // By default we disable resilience options on loadbalancing exporter level
+ // to maintain compatibility with workflow in previous versions
Protocol: Protocol{
OTLP: *otlpDefaultCfg,
},
}
}
-func createTracesExporter(_ context.Context, params exporter.Settings, cfg component.Config) (exporter.Traces, error) {
- return newTracesExporter(params, cfg)
+func buildExporterConfig(cfg *Config, endpoint string) otlpexporter.Config {
+ oCfg := cfg.Protocol.OTLP
+ oCfg.Endpoint = endpoint
+
+ return oCfg
+}
+
+func buildExporterSettings(params exporter.Settings, endpoint string) exporter.Settings {
+ // Override child exporter ID to segregate metrics from loadbalancing top level
+ childName := endpoint
+ if params.ID.Name() != "" {
+ childName = fmt.Sprintf("%s_%s", params.ID.Name(), childName)
+ }
+ params.ID = component.NewIDWithName(params.ID.Type(), childName)
+ // Add "endpoint" attribute to child exporter logger to segregate logs from loadbalancing top level
+ params.Logger = params.Logger.With(zap.String(zapEndpointKey, endpoint))
+
+ return params
+}
+
+func buildExporterResilienceOptions(options []exporterhelper.Option, cfg *Config) []exporterhelper.Option {
+ if cfg.TimeoutSettings.Timeout > 0 {
+ options = append(options, exporterhelper.WithTimeout(cfg.TimeoutSettings))
+ }
+ if cfg.QueueSettings.Enabled {
+ options = append(options, exporterhelper.WithQueue(cfg.QueueSettings))
+ }
+ if cfg.BackOffConfig.Enabled {
+ options = append(options, exporterhelper.WithRetry(cfg.BackOffConfig))
+ }
+
+ return options
+}
+
+func createTracesExporter(ctx context.Context, params exporter.Settings, cfg component.Config) (exporter.Traces, error) {
+ c := cfg.(*Config)
+ exporter, err := newTracesExporter(params, cfg)
+ if err != nil {
+ return nil, fmt.Errorf("cannot configure loadbalancing traces exporter: %w", err)
+ }
+
+ options := []exporterhelper.Option{
+ exporterhelper.WithStart(exporter.Start),
+ exporterhelper.WithShutdown(exporter.Shutdown),
+ exporterhelper.WithCapabilities(exporter.Capabilities()),
+ }
+
+ return exporterhelper.NewTraces(
+ ctx,
+ params,
+ cfg,
+ exporter.ConsumeTraces,
+ buildExporterResilienceOptions(options, c)...,
+ )
}
-func createLogsExporter(_ context.Context, params exporter.Settings, cfg component.Config) (exporter.Logs, error) {
- return newLogsExporter(params, cfg)
+func createLogsExporter(ctx context.Context, params exporter.Settings, cfg component.Config) (exporter.Logs, error) {
+ c := cfg.(*Config)
+ exporter, err := newLogsExporter(params, cfg)
+ if err != nil {
+ return nil, fmt.Errorf("cannot configure loadbalancing logs exporter: %w", err)
+ }
+
+ options := []exporterhelper.Option{
+ exporterhelper.WithStart(exporter.Start),
+ exporterhelper.WithShutdown(exporter.Shutdown),
+ exporterhelper.WithCapabilities(exporter.Capabilities()),
+ }
+
+ return exporterhelper.NewLogs(
+ ctx,
+ params,
+ cfg,
+ exporter.ConsumeLogs,
+ buildExporterResilienceOptions(options, c)...,
+ )
}
-func createMetricsExporter(_ context.Context, params exporter.Settings, cfg component.Config) (exporter.Metrics, error) {
- return newMetricsExporter(params, cfg)
+func createMetricsExporter(ctx context.Context, params exporter.Settings, cfg component.Config) (exporter.Metrics, error) {
+ c := cfg.(*Config)
+ exporter, err := newMetricsExporter(params, cfg)
+ if err != nil {
+ return nil, fmt.Errorf("cannot configure loadbalancing metrics exporter: %w", err)
+ }
+
+ options := []exporterhelper.Option{
+ exporterhelper.WithStart(exporter.Start),
+ exporterhelper.WithShutdown(exporter.Shutdown),
+ exporterhelper.WithCapabilities(exporter.Capabilities()),
+ }
+
+ return exporterhelper.NewMetrics(
+ ctx,
+ params,
+ cfg,
+ exporter.ConsumeMetrics,
+ buildExporterResilienceOptions(options, c)...,
+ )
}
diff --git a/exporter/loadbalancingexporter/factory_test.go b/exporter/loadbalancingexporter/factory_test.go
index 974b13d04bdb..b4d3ff103e5a 100644
--- a/exporter/loadbalancingexporter/factory_test.go
+++ b/exporter/loadbalancingexporter/factory_test.go
@@ -5,10 +5,22 @@ package loadbalancingexporter
import (
"context"
+ "fmt"
+ "path/filepath"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/config/configretry"
+ "go.opentelemetry.io/collector/exporter/exporterhelper"
"go.opentelemetry.io/collector/exporter/exportertest"
+ "go.opentelemetry.io/collector/exporter/otlpexporter"
+ "go.opentelemetry.io/collector/otelcol/otelcoltest"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest/observer"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata"
)
func TestTracesExporterGetsCreatedWithValidConfiguration(t *testing.T) {
@@ -58,3 +70,90 @@ func TestOTLPConfigIsValid(t *testing.T) {
// verify
assert.NoError(t, otlpCfg.Validate())
}
+
+func TestBuildExporterConfig(t *testing.T) {
+ // prepare
+ factories, err := otelcoltest.NopFactories()
+ require.NoError(t, err)
+
+ factories.Exporters[metadata.Type] = NewFactory()
+ // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594
+ // nolint:staticcheck
+ cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "test-build-exporter-config.yaml"), factories)
+ require.NoError(t, err)
+ require.NotNil(t, cfg)
+
+ c := cfg.Exporters[component.NewID(metadata.Type)]
+ require.NotNil(t, c)
+
+ // test
+ defaultCfg := otlpexporter.NewFactory().CreateDefaultConfig().(*otlpexporter.Config)
+ exporterCfg := buildExporterConfig(c.(*Config), "the-endpoint")
+
+ // verify
+ grpcSettings := defaultCfg.ClientConfig
+ grpcSettings.Endpoint = "the-endpoint"
+ assert.Equal(t, grpcSettings, exporterCfg.ClientConfig)
+
+ assert.Equal(t, defaultCfg.TimeoutConfig, exporterCfg.TimeoutConfig)
+ assert.Equal(t, defaultCfg.QueueConfig, exporterCfg.QueueConfig)
+ assert.Equal(t, defaultCfg.RetryConfig, exporterCfg.RetryConfig)
+}
+
+func TestBuildExporterSettings(t *testing.T) {
+ // prepare
+ creationParams := exportertest.NewNopSettings()
+ testEndpoint := "the-endpoint"
+ observedZapCore, observedLogs := observer.New(zap.InfoLevel)
+ creationParams.Logger = zap.New(observedZapCore)
+
+ // test
+ exporterParams := buildExporterSettings(creationParams, testEndpoint)
+ exporterParams.Logger.Info("test")
+
+ // verify
+ expectedID := component.NewIDWithName(
+ creationParams.ID.Type(),
+ fmt.Sprintf("%s_%s", creationParams.ID.Name(), testEndpoint),
+ )
+ assert.Equal(t, expectedID, exporterParams.ID)
+
+ allLogs := observedLogs.All()
+ require.Equal(t, 1, observedLogs.Len())
+ assert.Contains(t,
+ allLogs[0].Context,
+ zap.String(zapEndpointKey, testEndpoint),
+ )
+}
+
+func TestBuildExporterResilienceOptions(t *testing.T) {
+ t.Run("Shouldn't have resilience options by default", func(t *testing.T) {
+ o := []exporterhelper.Option{}
+ cfg := createDefaultConfig().(*Config)
+ assert.Empty(t, buildExporterResilienceOptions(o, cfg))
+ })
+ t.Run("Should have timeout option if defined", func(t *testing.T) {
+ o := []exporterhelper.Option{}
+ cfg := createDefaultConfig().(*Config)
+ cfg.TimeoutSettings = exporterhelper.NewDefaultTimeoutConfig()
+
+ assert.Len(t, buildExporterResilienceOptions(o, cfg), 1)
+ })
+ t.Run("Should have timeout and queue options if defined", func(t *testing.T) {
+ o := []exporterhelper.Option{}
+ cfg := createDefaultConfig().(*Config)
+ cfg.TimeoutSettings = exporterhelper.NewDefaultTimeoutConfig()
+ cfg.QueueSettings = exporterhelper.NewDefaultQueueConfig()
+
+ assert.Len(t, buildExporterResilienceOptions(o, cfg), 2)
+ })
+ t.Run("Should have all resilience options if defined", func(t *testing.T) {
+ o := []exporterhelper.Option{}
+ cfg := createDefaultConfig().(*Config)
+ cfg.TimeoutSettings = exporterhelper.NewDefaultTimeoutConfig()
+ cfg.QueueSettings = exporterhelper.NewDefaultQueueConfig()
+ cfg.BackOffConfig = configretry.NewDefaultBackOffConfig()
+
+ assert.Len(t, buildExporterResilienceOptions(o, cfg), 3)
+ })
+}
diff --git a/exporter/loadbalancingexporter/go.mod b/exporter/loadbalancingexporter/go.mod
index ae60ad3b3895..b0c0269f9d96 100644
--- a/exporter/loadbalancingexporter/go.mod
+++ b/exporter/loadbalancingexporter/go.mod
@@ -14,6 +14,7 @@ require (
github.com/stretchr/testify v1.10.0
go.opentelemetry.io/collector/component v0.114.0
go.opentelemetry.io/collector/component/componenttest v0.114.0
+ go.opentelemetry.io/collector/config/configretry v1.20.0
go.opentelemetry.io/collector/config/configtelemetry v0.114.0
go.opentelemetry.io/collector/confmap v1.20.0
go.opentelemetry.io/collector/consumer v0.114.0
@@ -113,7 +114,6 @@ require (
go.opentelemetry.io/collector/config/configgrpc v0.114.0 // indirect
go.opentelemetry.io/collector/config/confignet v1.20.0 // indirect
go.opentelemetry.io/collector/config/configopaque v1.20.0 // indirect
- go.opentelemetry.io/collector/config/configretry v1.20.0 // indirect
go.opentelemetry.io/collector/config/configtls v1.20.0 // indirect
go.opentelemetry.io/collector/config/internal v0.114.0 // indirect
go.opentelemetry.io/collector/confmap/provider/envprovider v1.20.0 // indirect
diff --git a/exporter/loadbalancingexporter/log_exporter.go b/exporter/loadbalancingexporter/log_exporter.go
index 8d4e3cf56b37..2d1385b76e34 100644
--- a/exporter/loadbalancingexporter/log_exporter.go
+++ b/exporter/loadbalancingexporter/log_exporter.go
@@ -41,7 +41,9 @@ func newLogsExporter(params exporter.Settings, cfg component.Config) (*logExport
exporterFactory := otlpexporter.NewFactory()
cfFunc := func(ctx context.Context, endpoint string) (component.Component, error) {
oCfg := buildExporterConfig(cfg.(*Config), endpoint)
- return exporterFactory.CreateLogs(ctx, params, &oCfg)
+ oParams := buildExporterSettings(params, endpoint)
+
+ return exporterFactory.CreateLogs(ctx, oParams, &oCfg)
}
lb, err := newLoadBalancer(params.Logger, cfg, cfFunc, telemetry)
diff --git a/exporter/loadbalancingexporter/metrics_exporter.go b/exporter/loadbalancingexporter/metrics_exporter.go
index f88b8c7557df..45bef77149e3 100644
--- a/exporter/loadbalancingexporter/metrics_exporter.go
+++ b/exporter/loadbalancingexporter/metrics_exporter.go
@@ -43,7 +43,9 @@ func newMetricsExporter(params exporter.Settings, cfg component.Config) (*metric
exporterFactory := otlpexporter.NewFactory()
cfFunc := func(ctx context.Context, endpoint string) (component.Component, error) {
oCfg := buildExporterConfig(cfg.(*Config), endpoint)
- return exporterFactory.CreateMetrics(ctx, params, &oCfg)
+ oParams := buildExporterSettings(params, endpoint)
+
+ return exporterFactory.CreateMetrics(ctx, oParams, &oCfg)
}
lb, err := newLoadBalancer(params.Logger, cfg, cfFunc, telemetry)
diff --git a/exporter/loadbalancingexporter/metrics_exporter_test.go b/exporter/loadbalancingexporter/metrics_exporter_test.go
index 5faaf284ae7e..1013dcda2a5e 100644
--- a/exporter/loadbalancingexporter/metrics_exporter_test.go
+++ b/exporter/loadbalancingexporter/metrics_exporter_test.go
@@ -24,14 +24,11 @@ import (
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/exporter"
- "go.opentelemetry.io/collector/exporter/otlpexporter"
- "go.opentelemetry.io/collector/otelcol/otelcoltest"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
conventions "go.opentelemetry.io/collector/semconv/v1.27.0"
"gopkg.in/yaml.v2"
- "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest"
)
@@ -673,35 +670,6 @@ func TestConsumeMetricsUnexpectedExporterType(t *testing.T) {
assert.EqualError(t, res, fmt.Sprintf("unable to export metrics, unexpected exporter type: expected exporter.Metrics but got %T", newNopMockExporter()))
}
-func TestBuildExporterConfigUnknown(t *testing.T) {
- // prepare
- factories, err := otelcoltest.NopFactories()
- require.NoError(t, err)
-
- factories.Exporters[metadata.Type] = NewFactory()
- // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594
- // nolint:staticcheck
- cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "test-build-exporter-config.yaml"), factories)
- require.NoError(t, err)
- require.NotNil(t, cfg)
-
- c := cfg.Exporters[component.NewID(metadata.Type)]
- require.NotNil(t, c)
-
- // test
- defaultCfg := otlpexporter.NewFactory().CreateDefaultConfig().(*otlpexporter.Config)
- exporterCfg := buildExporterConfig(c.(*Config), "the-endpoint")
-
- // verify
- grpcSettings := defaultCfg.ClientConfig
- grpcSettings.Endpoint = "the-endpoint"
- assert.Equal(t, grpcSettings, exporterCfg.ClientConfig)
-
- assert.Equal(t, defaultCfg.TimeoutConfig, exporterCfg.TimeoutConfig)
- assert.Equal(t, defaultCfg.QueueConfig, exporterCfg.QueueConfig)
- assert.Equal(t, defaultCfg.RetryConfig, exporterCfg.RetryConfig)
-}
-
func TestBatchWithTwoMetrics(t *testing.T) {
ts, tb := getTelemetryAssets(t)
sink := new(consumertest.MetricsSink)
diff --git a/exporter/loadbalancingexporter/testdata/config.yaml b/exporter/loadbalancingexporter/testdata/config.yaml
index da1d51818e59..64a0271338b3 100644
--- a/exporter/loadbalancingexporter/testdata/config.yaml
+++ b/exporter/loadbalancingexporter/testdata/config.yaml
@@ -1,6 +1,6 @@
loadbalancing:
protocol:
- # the OTLP exporter configuration. "endpoint" values will be ignored
+ # the OTLP exporter configuration "endpoint" values will be ignored
otlp:
timeout: 1s
@@ -38,3 +38,12 @@ loadbalancing/4:
namespace: cloudmap-1
service_name: service-1
port: 4319
+
+loadbalancing/5:
+ # the OTLP exporter configuration "sending_queue" values will be ignored
+ sending_queue:
+ enabled: true
+ protocol:
+ otlp:
+ sending_queue:
+ enabled: false
diff --git a/exporter/loadbalancingexporter/trace_exporter.go b/exporter/loadbalancingexporter/trace_exporter.go
index 3e335e8a9e14..e6fb9647d977 100644
--- a/exporter/loadbalancingexporter/trace_exporter.go
+++ b/exporter/loadbalancingexporter/trace_exporter.go
@@ -45,7 +45,9 @@ func newTracesExporter(params exporter.Settings, cfg component.Config) (*traceEx
exporterFactory := otlpexporter.NewFactory()
cfFunc := func(ctx context.Context, endpoint string) (component.Component, error) {
oCfg := buildExporterConfig(cfg.(*Config), endpoint)
- return exporterFactory.CreateTraces(ctx, params, &oCfg)
+ oParams := buildExporterSettings(params, endpoint)
+
+ return exporterFactory.CreateTraces(ctx, oParams, &oCfg)
}
lb, err := newLoadBalancer(params.Logger, cfg, cfFunc, telemetry)
@@ -69,12 +71,6 @@ func newTracesExporter(params exporter.Settings, cfg component.Config) (*traceEx
return &traceExporter, nil
}
-func buildExporterConfig(cfg *Config, endpoint string) otlpexporter.Config {
- oCfg := cfg.Protocol.OTLP
- oCfg.Endpoint = endpoint
- return oCfg
-}
-
func (e *traceExporterImp) Capabilities() consumer.Capabilities {
return consumer.Capabilities{MutatesData: false}
}
diff --git a/exporter/loadbalancingexporter/trace_exporter_test.go b/exporter/loadbalancingexporter/trace_exporter_test.go
index d48aeb462bcf..8751c83e8986 100644
--- a/exporter/loadbalancingexporter/trace_exporter_test.go
+++ b/exporter/loadbalancingexporter/trace_exporter_test.go
@@ -9,7 +9,6 @@ import (
"fmt"
"math/rand"
"net"
- "path/filepath"
"sync"
"sync/atomic"
"testing"
@@ -23,13 +22,9 @@ import (
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/exporter"
"go.opentelemetry.io/collector/exporter/exportertest"
- "go.opentelemetry.io/collector/exporter/otlpexporter"
- "go.opentelemetry.io/collector/otelcol/otelcoltest"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
conventions "go.opentelemetry.io/collector/semconv/v1.27.0"
-
- "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata"
)
func TestNewTracesExporter(t *testing.T) {
@@ -349,35 +344,6 @@ func TestConsumeTracesUnexpectedExporterType(t *testing.T) {
assert.EqualError(t, res, fmt.Sprintf("unable to export traces, unexpected exporter type: expected exporter.Traces but got %T", newNopMockExporter()))
}
-func TestBuildExporterConfig(t *testing.T) {
- // prepare
- factories, err := otelcoltest.NopFactories()
- require.NoError(t, err)
-
- factories.Exporters[metadata.Type] = NewFactory()
- // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594
- // nolint:staticcheck
- cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "test-build-exporter-config.yaml"), factories)
- require.NoError(t, err)
- require.NotNil(t, cfg)
-
- c := cfg.Exporters[component.NewID(metadata.Type)]
- require.NotNil(t, c)
-
- // test
- defaultCfg := otlpexporter.NewFactory().CreateDefaultConfig().(*otlpexporter.Config)
- exporterCfg := buildExporterConfig(c.(*Config), "the-endpoint")
-
- // verify
- grpcSettings := defaultCfg.ClientConfig
- grpcSettings.Endpoint = "the-endpoint"
- assert.Equal(t, grpcSettings, exporterCfg.ClientConfig)
-
- assert.Equal(t, defaultCfg.TimeoutConfig, exporterCfg.TimeoutConfig)
- assert.Equal(t, defaultCfg.QueueConfig, exporterCfg.QueueConfig)
- assert.Equal(t, defaultCfg.RetryConfig, exporterCfg.RetryConfig)
-}
-
func TestBatchWithTwoTraces(t *testing.T) {
ts, tb := getTelemetryAssets(t)
sink := new(consumertest.TracesSink)
From 756d2496b28a91a86601c0871d09edff39a84f43 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juraci=20Paix=C3=A3o=20Kr=C3=B6hling?=
Date: Wed, 27 Nov 2024 16:41:40 +0100
Subject: [PATCH 09/23] [chore] remove jpkrohling as codeowner from many
modules (#36563)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR removes @jpkrohling as code owner for multiple modules.
Signed-off-by: Juraci Paixão Kröhling
---------
Signed-off-by: Juraci Paixão Kröhling
---
.github/CODEOWNERS | 26 +++++++--------
cmd/githubgen/allowlist.txt | 33 ++++++++++---------
connector/exceptionsconnector/README.md | 2 +-
connector/exceptionsconnector/metadata.yaml | 2 +-
connector/grafanacloudconnector/README.md | 2 +-
connector/grafanacloudconnector/metadata.yaml | 2 +-
connector/servicegraphconnector/README.md | 2 +-
connector/servicegraphconnector/metadata.yaml | 2 +-
exporter/alertmanagerexporter/README.md | 2 +-
exporter/alertmanagerexporter/metadata.yaml | 2 +-
exporter/lokiexporter/README.md | 2 +-
exporter/lokiexporter/metadata.yaml | 2 +-
extension/headerssetterextension/README.md | 2 +-
.../headerssetterextension/metadata.yaml | 3 +-
extension/healthcheckextension/README.md | 2 +-
extension/healthcheckextension/metadata.yaml | 2 +-
pkg/translator/loki/metadata.yaml | 2 +-
.../deltatocumulativeprocessor/README.md | 2 +-
.../deltatocumulativeprocessor/metadata.yaml | 2 +-
.../awscloudwatchmetricsreceiver/README.md | 2 +-
.../metadata.yaml | 2 +-
receiver/datadogreceiver/README.md | 2 +-
receiver/datadogreceiver/metadata.yaml | 2 +-
receiver/lokireceiver/README.md | 2 +-
receiver/lokireceiver/metadata.yaml | 2 +-
25 files changed, 54 insertions(+), 52 deletions(-)
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index b9f0a07b24aa..903af87d29d4 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -27,20 +27,20 @@ confmap/provider/secretsmanagerprovider/ @open-telemetry/collector-cont
connector/countconnector/ @open-telemetry/collector-contrib-approvers @djaglowski @jpkrohling
connector/datadogconnector/ @open-telemetry/collector-contrib-approvers @mx-psi @dineshg13 @ankitpatel96 @jade-guiton-dd
-connector/exceptionsconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @marctc
+connector/exceptionsconnector/ @open-telemetry/collector-contrib-approvers @marctc
connector/failoverconnector/ @open-telemetry/collector-contrib-approvers @akats7 @djaglowski @fatsheep9146
-connector/grafanacloudconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @rlankfo @jcreixell
+connector/grafanacloudconnector/ @open-telemetry/collector-contrib-approvers @rlankfo @jcreixell
connector/otlpjsonconnector/ @open-telemetry/collector-contrib-approvers @djaglowski @ChrsMark
connector/roundrobinconnector/ @open-telemetry/collector-contrib-approvers @bogdandrutu
connector/routingconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear
-connector/servicegraphconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @mapno @JaredTan95
+connector/servicegraphconnector/ @open-telemetry/collector-contrib-approvers @mapno @JaredTan95
connector/signaltometricsconnector/ @open-telemetry/collector-contrib-approvers @ChrsMark @lahsivjar
connector/spanmetricsconnector/ @open-telemetry/collector-contrib-approvers @portertech @Frapschen
connector/sumconnector/ @open-telemetry/collector-contrib-approvers @greatestusername @shalper2 @crobert-1
examples/demo/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers
-exporter/alertmanagerexporter/ @open-telemetry/collector-contrib-approvers @jpkrohling @sokoide @mcube8
+exporter/alertmanagerexporter/ @open-telemetry/collector-contrib-approvers @sokoide @mcube8
exporter/alibabacloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @shabicheng @kongluoxing @qiansheng91
exporter/awscloudwatchlogsexporter/ @open-telemetry/collector-contrib-approvers @boostchicken @bryan-aguilar @rapphil
exporter/awsemfexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @mxiamxia @bryan-aguilar
@@ -68,7 +68,7 @@ exporter/kineticaexporter/ @open-telemetry/collector-cont
exporter/loadbalancingexporter/ @open-telemetry/collector-contrib-approvers @jpkrohling
exporter/logicmonitorexporter/ @open-telemetry/collector-contrib-approvers @bogdandrutu @khyatigandhi6 @avadhut123pisal
exporter/logzioexporter/ @open-telemetry/collector-contrib-approvers @yotamloe
-exporter/lokiexporter/ @open-telemetry/collector-contrib-approvers @gramidt @jpkrohling @mar4uk
+exporter/lokiexporter/ @open-telemetry/collector-contrib-approvers @gramidt @mar4uk
exporter/mezmoexporter/ @open-telemetry/collector-contrib-approvers @dashpole @billmeyer @gjanco
exporter/opencensusexporter/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers
exporter/otelarrowexporter/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 @lquerel
@@ -99,8 +99,8 @@ extension/encoding/otlpencodingextension/ @open-telemetry/collector-cont
extension/encoding/textencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @atoulme
extension/encoding/zipkinencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @dao-jun
extension/googleclientauthextension/ @open-telemetry/collector-contrib-approvers @dashpole @aabmass @jsuereth @punya @psx95
-extension/headerssetterextension/ @open-telemetry/collector-contrib-approvers @jpkrohling
-extension/healthcheckextension/ @open-telemetry/collector-contrib-approvers @jpkrohling
+extension/headerssetterextension/ @open-telemetry/collector-contrib-approvers
+extension/healthcheckextension/ @open-telemetry/collector-contrib-approvers
extension/healthcheckv2extension/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear
extension/httpforwarderextension/ @open-telemetry/collector-contrib-approvers @atoulme
extension/jaegerremotesampling/ @open-telemetry/collector-contrib-approvers @yurishkuro @frzifus
@@ -161,7 +161,7 @@ pkg/status/ @open-telemetry/collector-cont
pkg/translator/azure/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers @atoulme @cparkins
pkg/translator/azurelogs/ @open-telemetry/collector-contrib-approvers @atoulme @cparkins @MikeGoldsmith
pkg/translator/jaeger/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers @frzifus
-pkg/translator/loki/ @open-telemetry/collector-contrib-approvers @gouthamve @jpkrohling @mar4uk
+pkg/translator/loki/ @open-telemetry/collector-contrib-approvers @gouthamve @mar4uk
pkg/translator/opencensus/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers
pkg/translator/prometheus/ @open-telemetry/collector-contrib-approvers @dashpole @bertysentry @ArthurSens
pkg/translator/prometheusremotewrite/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole
@@ -173,7 +173,7 @@ pkg/winperfcounters/ @open-telemetry/collector-cont
processor/attributesprocessor/ @open-telemetry/collector-contrib-approvers @boostchicken
processor/coralogixprocessor/ @open-telemetry/collector-contrib-approvers @crobert-1 @galrose
processor/cumulativetodeltaprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth
-processor/deltatocumulativeprocessor/ @open-telemetry/collector-contrib-approvers @sh0rez @RichieSams @jpkrohling
+processor/deltatocumulativeprocessor/ @open-telemetry/collector-contrib-approvers @sh0rez @RichieSams
processor/deltatorateprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9
processor/filterprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @boostchicken
processor/geoipprocessor/ @open-telemetry/collector-contrib-approvers @andrzej-stencel @michalpristas @rogercoll
@@ -201,7 +201,7 @@ receiver/activedirectorydsreceiver/ @open-telemetry/collector-cont
receiver/aerospikereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @antonblock
receiver/apachereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski
receiver/apachesparkreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @Caleb-Hurshman @mrsillydog
-receiver/awscloudwatchmetricsreceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling
+receiver/awscloudwatchmetricsreceiver/ @open-telemetry/collector-contrib-approvers
receiver/awscloudwatchreceiver/ @open-telemetry/collector-contrib-approvers @schmikei
receiver/awscontainerinsightreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @pxaws
receiver/awsecscontainermetricsreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9
@@ -218,7 +218,7 @@ receiver/cloudflarereceiver/ @open-telemetry/collector-cont
receiver/cloudfoundryreceiver/ @open-telemetry/collector-contrib-approvers @crobert-1
receiver/collectdreceiver/ @open-telemetry/collector-contrib-approvers @atoulme
receiver/couchdbreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski
-receiver/datadogreceiver/ @open-telemetry/collector-contrib-approvers @boostchicken @gouthamve @jpkrohling @MovieStoreGuy
+receiver/datadogreceiver/ @open-telemetry/collector-contrib-approvers @boostchicken @gouthamve @MovieStoreGuy
receiver/dockerstatsreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis
receiver/elasticsearchreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski
receiver/expvarreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy
@@ -233,7 +233,7 @@ receiver/googlecloudspannerreceiver/ @open-telemetry/collector-cont
receiver/haproxyreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @MovieStoreGuy
receiver/hostmetricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @braydonk
receiver/httpcheckreceiver/ @open-telemetry/collector-contrib-approvers @codeboten
-receiver/huaweicloudcesreceiver/ @open-telemetry/collector-contrib-approvers @narcis96
+receiver/huaweicloudcesreceiver/ @open-telemetry/collector-contrib-approvers @heitorganzeli @narcis96 @mwear
receiver/iisreceiver/ @open-telemetry/collector-contrib-approvers @Mrod1598 @pjanotti
receiver/influxdbreceiver/ @open-telemetry/collector-contrib-approvers @jacobmarble
receiver/jaegerreceiver/ @open-telemetry/collector-contrib-approvers @yurishkuro
@@ -244,7 +244,7 @@ receiver/k8sobjectsreceiver/ @open-telemetry/collector-cont
receiver/kafkametricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax
receiver/kafkareceiver/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy
receiver/kubeletstatsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth @ChrsMark
-receiver/lokireceiver/ @open-telemetry/collector-contrib-approvers @mar4uk @jpkrohling
+receiver/lokireceiver/ @open-telemetry/collector-contrib-approvers @mar4uk
receiver/memcachedreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski
receiver/mongodbatlasreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei
receiver/mongodbreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei
diff --git a/cmd/githubgen/allowlist.txt b/cmd/githubgen/allowlist.txt
index 69136f05ea7f..708072cfce0a 100644
--- a/cmd/githubgen/allowlist.txt
+++ b/cmd/githubgen/allowlist.txt
@@ -1,23 +1,24 @@
+abhishek-at-cloudwerx
+adcharre
Caleb-Hurshman
+cemdk
cheempz
-jerrytfleung
+dlopes7
driverpt
-adcharre
-jcreixell
-rlankfo
-swar8080
-zpzhuSplunk
-thmshmm
+dsimil
galrose
-cemdk
-m1rp
-jriguera
-abhishek-at-cloudwerx
+harishbohara11
+heitorganzeli
+Hemansh31
+jcreixell
+jerrytfleung
joker-star-l
+jriguera
+KiranmayiB
+m1rp
michael-burt
-Hemansh31
+rlankfo
shazlehu
-dsimil
-KiranmayiB
-harishbohara11
-dlopes7
\ No newline at end of file
+swar8080
+thmshmm
+zpzhuSplunk
\ No newline at end of file
diff --git a/connector/exceptionsconnector/README.md b/connector/exceptionsconnector/README.md
index c2e7e3492ffb..14ceb097eaf9 100644
--- a/connector/exceptionsconnector/README.md
+++ b/connector/exceptionsconnector/README.md
@@ -5,7 +5,7 @@
| ------------- |-----------|
| Distributions | [contrib], [k8s] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aconnector%2Fexceptions%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aconnector%2Fexceptions) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aconnector%2Fexceptions%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aconnector%2Fexceptions) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling), [@marctc](https://www.github.com/marctc) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@marctc](https://www.github.com/marctc) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/connector/exceptionsconnector/metadata.yaml b/connector/exceptionsconnector/metadata.yaml
index 648ba9341f8d..81e18bb7fd07 100644
--- a/connector/exceptionsconnector/metadata.yaml
+++ b/connector/exceptionsconnector/metadata.yaml
@@ -6,7 +6,7 @@ status:
alpha: [traces_to_metrics, traces_to_logs]
distributions: [contrib, k8s]
codeowners:
- active: [jpkrohling, marctc]
+ active: [marctc]
tests:
config:
diff --git a/connector/grafanacloudconnector/README.md b/connector/grafanacloudconnector/README.md
index e6d7b035e832..6bf11c3f4528 100644
--- a/connector/grafanacloudconnector/README.md
+++ b/connector/grafanacloudconnector/README.md
@@ -5,7 +5,7 @@
| ------------- |-----------|
| Distributions | [contrib] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aconnector%2Fgrafanacloud%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aconnector%2Fgrafanacloud) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aconnector%2Fgrafanacloud%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aconnector%2Fgrafanacloud) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling), [@rlankfo](https://www.github.com/rlankfo), [@jcreixell](https://www.github.com/jcreixell) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@rlankfo](https://www.github.com/rlankfo), [@jcreixell](https://www.github.com/jcreixell) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/connector/grafanacloudconnector/metadata.yaml b/connector/grafanacloudconnector/metadata.yaml
index 873309168138..91a34a2e87a5 100644
--- a/connector/grafanacloudconnector/metadata.yaml
+++ b/connector/grafanacloudconnector/metadata.yaml
@@ -6,7 +6,7 @@ status:
alpha: [traces_to_metrics]
distributions: [contrib]
codeowners:
- active: [jpkrohling, rlankfo, jcreixell]
+ active: [rlankfo, jcreixell]
emeritus: []
seeking_new: false
diff --git a/connector/servicegraphconnector/README.md b/connector/servicegraphconnector/README.md
index 254ac345c4ba..f521cedadbd5 100644
--- a/connector/servicegraphconnector/README.md
+++ b/connector/servicegraphconnector/README.md
@@ -5,7 +5,7 @@
| ------------- |-----------|
| Distributions | [contrib], [k8s] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aconnector%2Fservicegraph%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aconnector%2Fservicegraph) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aconnector%2Fservicegraph%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aconnector%2Fservicegraph) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling), [@mapno](https://www.github.com/mapno), [@JaredTan95](https://www.github.com/JaredTan95) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mapno](https://www.github.com/mapno), [@JaredTan95](https://www.github.com/JaredTan95) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/connector/servicegraphconnector/metadata.yaml b/connector/servicegraphconnector/metadata.yaml
index 420ec391a716..98a16f3883b3 100644
--- a/connector/servicegraphconnector/metadata.yaml
+++ b/connector/servicegraphconnector/metadata.yaml
@@ -6,7 +6,7 @@ status:
alpha: [traces_to_metrics]
distributions: [contrib, k8s]
codeowners:
- active: [jpkrohling, mapno, JaredTan95]
+ active: [mapno, JaredTan95]
tests:
config:
diff --git a/exporter/alertmanagerexporter/README.md b/exporter/alertmanagerexporter/README.md
index d583ac836fec..d7f5248a95ac 100644
--- a/exporter/alertmanagerexporter/README.md
+++ b/exporter/alertmanagerexporter/README.md
@@ -5,7 +5,7 @@
| Stability | [development]: traces |
| Distributions | [] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Falertmanager%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Falertmanager) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Falertmanager%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Falertmanager) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling), [@sokoide](https://www.github.com/sokoide), [@mcube8](https://www.github.com/mcube8) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@sokoide](https://www.github.com/sokoide), [@mcube8](https://www.github.com/mcube8) |
[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development
diff --git a/exporter/alertmanagerexporter/metadata.yaml b/exporter/alertmanagerexporter/metadata.yaml
index 00796c12e4b4..344808e4bcb9 100644
--- a/exporter/alertmanagerexporter/metadata.yaml
+++ b/exporter/alertmanagerexporter/metadata.yaml
@@ -6,7 +6,7 @@ status:
development: [traces]
distributions: []
codeowners:
- active: [jpkrohling, sokoide, mcube8]
+ active: [sokoide, mcube8]
tests:
config:
diff --git a/exporter/lokiexporter/README.md b/exporter/lokiexporter/README.md
index 98682b538be1..8929d4f32582 100644
--- a/exporter/lokiexporter/README.md
+++ b/exporter/lokiexporter/README.md
@@ -6,7 +6,7 @@
| Stability | [deprecated]: logs |
| Distributions | [contrib] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Floki%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Floki) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Floki%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Floki) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@gramidt](https://www.github.com/gramidt), [@jpkrohling](https://www.github.com/jpkrohling), [@mar4uk](https://www.github.com/mar4uk) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@gramidt](https://www.github.com/gramidt), [@mar4uk](https://www.github.com/mar4uk) |
[deprecated]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#deprecated
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/exporter/lokiexporter/metadata.yaml b/exporter/lokiexporter/metadata.yaml
index 9f21bcd5991e..9ad3e2c2d47b 100644
--- a/exporter/lokiexporter/metadata.yaml
+++ b/exporter/lokiexporter/metadata.yaml
@@ -7,7 +7,7 @@ status:
distributions:
- contrib
codeowners:
- active: [gramidt, jpkrohling, mar4uk]
+ active: [gramidt, mar4uk]
tests:
expect_consumer_error: true
diff --git a/extension/headerssetterextension/README.md b/extension/headerssetterextension/README.md
index 57ccb9273b1b..ebad2c471056 100644
--- a/extension/headerssetterextension/README.md
+++ b/extension/headerssetterextension/README.md
@@ -5,7 +5,7 @@
| Stability | [alpha] |
| Distributions | [contrib], [k8s] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aextension%2Fheaderssetter%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aextension%2Fheaderssetter) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aextension%2Fheaderssetter%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aextension%2Fheaderssetter) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | \| Seeking more code owners! |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/extension/headerssetterextension/metadata.yaml b/extension/headerssetterextension/metadata.yaml
index ba825b4e3e24..0b8141bfb2a4 100644
--- a/extension/headerssetterextension/metadata.yaml
+++ b/extension/headerssetterextension/metadata.yaml
@@ -6,6 +6,7 @@ status:
alpha: [extension]
distributions: [contrib, k8s]
codeowners:
- active: [jpkrohling]
+ active: []
+ seeking_new: true
tests:
config:
diff --git a/extension/healthcheckextension/README.md b/extension/healthcheckextension/README.md
index a6de43ebfec3..3986147fab09 100644
--- a/extension/healthcheckextension/README.md
+++ b/extension/healthcheckextension/README.md
@@ -15,7 +15,7 @@
| Stability | [beta] |
| Distributions | [core], [contrib], [k8s] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aextension%2Fhealthcheck%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aextension%2Fhealthcheck) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aextension%2Fhealthcheck%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aextension%2Fhealthcheck) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | |
[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta
[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol
diff --git a/extension/healthcheckextension/metadata.yaml b/extension/healthcheckextension/metadata.yaml
index 9f0dd481d733..30e83b2d97a1 100644
--- a/extension/healthcheckextension/metadata.yaml
+++ b/extension/healthcheckextension/metadata.yaml
@@ -6,7 +6,7 @@ status:
beta: [extension]
distributions: [core, contrib, k8s]
codeowners:
- active: [jpkrohling]
+ active: []
tests:
config:
diff --git a/pkg/translator/loki/metadata.yaml b/pkg/translator/loki/metadata.yaml
index 94d94e815591..775ce516da5c 100644
--- a/pkg/translator/loki/metadata.yaml
+++ b/pkg/translator/loki/metadata.yaml
@@ -1,3 +1,3 @@
status:
codeowners:
- active: [gouthamve, jpkrohling, mar4uk]
\ No newline at end of file
+ active: [gouthamve, mar4uk]
\ No newline at end of file
diff --git a/processor/deltatocumulativeprocessor/README.md b/processor/deltatocumulativeprocessor/README.md
index 8672cebf257a..04548d5f7f8e 100644
--- a/processor/deltatocumulativeprocessor/README.md
+++ b/processor/deltatocumulativeprocessor/README.md
@@ -7,7 +7,7 @@
| Distributions | [contrib], [k8s] |
| Warnings | [Statefulness](#warnings) |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Fdeltatocumulative%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fdeltatocumulative) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Fdeltatocumulative%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fdeltatocumulative) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@sh0rez](https://www.github.com/sh0rez), [@RichieSams](https://www.github.com/RichieSams), [@jpkrohling](https://www.github.com/jpkrohling) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@sh0rez](https://www.github.com/sh0rez), [@RichieSams](https://www.github.com/RichieSams) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/processor/deltatocumulativeprocessor/metadata.yaml b/processor/deltatocumulativeprocessor/metadata.yaml
index d35ca6b885b4..be925197db82 100644
--- a/processor/deltatocumulativeprocessor/metadata.yaml
+++ b/processor/deltatocumulativeprocessor/metadata.yaml
@@ -7,7 +7,7 @@ status:
distributions: [contrib, k8s]
warnings: [Statefulness]
codeowners:
- active: [sh0rez, RichieSams, jpkrohling]
+ active: [sh0rez, RichieSams]
telemetry:
metrics:
diff --git a/receiver/awscloudwatchmetricsreceiver/README.md b/receiver/awscloudwatchmetricsreceiver/README.md
index e6d645f0d462..5833904fa86f 100644
--- a/receiver/awscloudwatchmetricsreceiver/README.md
+++ b/receiver/awscloudwatchmetricsreceiver/README.md
@@ -6,7 +6,7 @@
| Stability | [development]: metrics |
| Distributions | [] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fawscloudwatchmetrics%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fawscloudwatchmetrics) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fawscloudwatchmetrics%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fawscloudwatchmetrics) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | |
[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development
diff --git a/receiver/awscloudwatchmetricsreceiver/metadata.yaml b/receiver/awscloudwatchmetricsreceiver/metadata.yaml
index ed1ebfa61213..e6c1f4056b28 100644
--- a/receiver/awscloudwatchmetricsreceiver/metadata.yaml
+++ b/receiver/awscloudwatchmetricsreceiver/metadata.yaml
@@ -6,7 +6,7 @@ status:
development: [metrics]
distributions: []
codeowners:
- active: [jpkrohling]
+ active: []
tests:
config:
diff --git a/receiver/datadogreceiver/README.md b/receiver/datadogreceiver/README.md
index 62f01792eee3..0eff13dbeb69 100644
--- a/receiver/datadogreceiver/README.md
+++ b/receiver/datadogreceiver/README.md
@@ -6,7 +6,7 @@
| Stability | [alpha]: traces, metrics |
| Distributions | [contrib] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fdatadog%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fdatadog) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fdatadog%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fdatadog) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@boostchicken](https://www.github.com/boostchicken), [@gouthamve](https://www.github.com/gouthamve), [@jpkrohling](https://www.github.com/jpkrohling), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@boostchicken](https://www.github.com/boostchicken), [@gouthamve](https://www.github.com/gouthamve), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/receiver/datadogreceiver/metadata.yaml b/receiver/datadogreceiver/metadata.yaml
index c74816384df8..da80f5f5ff39 100644
--- a/receiver/datadogreceiver/metadata.yaml
+++ b/receiver/datadogreceiver/metadata.yaml
@@ -6,7 +6,7 @@ status:
alpha: [traces, metrics]
distributions: [contrib]
codeowners:
- active: [boostchicken, gouthamve, jpkrohling, MovieStoreGuy]
+ active: [boostchicken, gouthamve, MovieStoreGuy]
tests:
skip_lifecycle: true # Skip lifecycle tests since there are multiple receivers that run on the same port
diff --git a/receiver/lokireceiver/README.md b/receiver/lokireceiver/README.md
index a7c8b081b1ae..1def93850cc2 100644
--- a/receiver/lokireceiver/README.md
+++ b/receiver/lokireceiver/README.md
@@ -6,7 +6,7 @@
| Stability | [alpha]: logs |
| Distributions | [contrib] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Floki%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Floki) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Floki%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Floki) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mar4uk](https://www.github.com/mar4uk), [@jpkrohling](https://www.github.com/jpkrohling) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mar4uk](https://www.github.com/mar4uk) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/receiver/lokireceiver/metadata.yaml b/receiver/lokireceiver/metadata.yaml
index 7daa3b5cd6c5..e68ce200beac 100644
--- a/receiver/lokireceiver/metadata.yaml
+++ b/receiver/lokireceiver/metadata.yaml
@@ -7,4 +7,4 @@ status:
distributions:
- contrib
codeowners:
- active: [mar4uk, jpkrohling]
+ active: [mar4uk]
From 809497347ac9a601295ebb14420953429725c9a7 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 11:42:31 -0800
Subject: [PATCH 10/23] Update module sigs.k8s.io/controller-runtime to v0.19.2
(#36541)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
|
[sigs.k8s.io/controller-runtime](https://redirect.github.com/kubernetes-sigs/controller-runtime)
| `v0.19.1` -> `v0.19.2` |
[![age](https://developer.mend.io/api/mc/badges/age/go/sigs.k8s.io%2fcontroller-runtime/v0.19.2?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![adoption](https://developer.mend.io/api/mc/badges/adoption/go/sigs.k8s.io%2fcontroller-runtime/v0.19.2?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![passing](https://developer.mend.io/api/mc/badges/compatibility/go/sigs.k8s.io%2fcontroller-runtime/v0.19.1/v0.19.2?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![confidence](https://developer.mend.io/api/mc/badges/confidence/go/sigs.k8s.io%2fcontroller-runtime/v0.19.1/v0.19.2?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
---
> [!WARNING]
> Some dependencies could not be looked up. Check the Dependency
Dashboard for more information.
---
### Release Notes
kubernetes-sigs/controller-runtime
(sigs.k8s.io/controller-runtime)
###
[`v0.19.2`](https://redirect.github.com/kubernetes-sigs/controller-runtime/releases/tag/v0.19.2)
[Compare
Source](https://redirect.github.com/kubernetes-sigs/controller-runtime/compare/v0.19.1...v0.19.2)
#### What's Changed
- ✨ Add EnableWatchBookmarks option to cache informers by
[@k8s-infra-cherrypick-robot](https://redirect.github.com/k8s-infra-cherrypick-robot)
in
[https://github.com/kubernetes-sigs/controller-runtime/pull/3018](https://redirect.github.com/kubernetes-sigs/controller-runtime/pull/3018)
**Full Changelog**:
https://github.com/kubernetes-sigs/controller-runtime/compare/v0.19.1...v0.19.2
---
### Configuration
📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any
time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.
â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.
---
- [ ] If you want to rebase/retry this PR, check
this box
---
This PR was generated by [Mend Renovate](https://mend.io/renovate/).
View the [repository job
log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib).
---------
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com>
Co-authored-by: Alex Boten <223565+codeboten@users.noreply.github.com>
---
exporter/loadbalancingexporter/go.mod | 2 +-
exporter/loadbalancingexporter/go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/exporter/loadbalancingexporter/go.mod b/exporter/loadbalancingexporter/go.mod
index b0c0269f9d96..c0c63d21314b 100644
--- a/exporter/loadbalancingexporter/go.mod
+++ b/exporter/loadbalancingexporter/go.mod
@@ -37,7 +37,7 @@ require (
k8s.io/apimachinery v0.31.3
k8s.io/client-go v0.31.3
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
- sigs.k8s.io/controller-runtime v0.19.1
+ sigs.k8s.io/controller-runtime v0.19.2
)
require (
diff --git a/exporter/loadbalancingexporter/go.sum b/exporter/loadbalancingexporter/go.sum
index cdea5b1106ec..e935e172b56a 100644
--- a/exporter/loadbalancingexporter/go.sum
+++ b/exporter/loadbalancingexporter/go.sum
@@ -436,8 +436,8 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk=
-sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
+sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8=
+sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
From e624d1b6ac5c07dce47b78510f7c421e7663372d Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 11:42:41 -0800
Subject: [PATCH 11/23] Update module
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common to v1.0.1048
(#36536)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
|
[github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common](https://redirect.github.com/tencentcloud/tencentcloud-sdk-go)
| `v1.0.1042` -> `v1.0.1048` |
[![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2ftencentcloud%2ftencentcloud-sdk-go%2ftencentcloud%2fcommon/v1.0.1048?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2ftencentcloud%2ftencentcloud-sdk-go%2ftencentcloud%2fcommon/v1.0.1048?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2ftencentcloud%2ftencentcloud-sdk-go%2ftencentcloud%2fcommon/v1.0.1042/v1.0.1048?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2ftencentcloud%2ftencentcloud-sdk-go%2ftencentcloud%2fcommon/v1.0.1042/v1.0.1048?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
---
> [!WARNING]
> Some dependencies could not be looked up. Check the Dependency
Dashboard for more information.
---
### Configuration
📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any
time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.
â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.
---
- [ ] If you want to rebase/retry this PR, check
this box
---
This PR was generated by [Mend Renovate](https://mend.io/renovate/).
View the [repository job
log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib).
---------
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com>
Co-authored-by: Alex Boten <223565+codeboten@users.noreply.github.com>
---
exporter/tencentcloudlogserviceexporter/go.mod | 2 +-
exporter/tencentcloudlogserviceexporter/go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/exporter/tencentcloudlogserviceexporter/go.mod b/exporter/tencentcloudlogserviceexporter/go.mod
index cdb1189d65d3..623ce51054ea 100644
--- a/exporter/tencentcloudlogserviceexporter/go.mod
+++ b/exporter/tencentcloudlogserviceexporter/go.mod
@@ -6,7 +6,7 @@ require (
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.114.0
github.com/pierrec/lz4 v2.6.1+incompatible
github.com/stretchr/testify v1.10.0
- github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1042
+ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1048
go.opentelemetry.io/collector/component v0.114.0
go.opentelemetry.io/collector/component/componenttest v0.114.0
go.opentelemetry.io/collector/config/configopaque v1.20.0
diff --git a/exporter/tencentcloudlogserviceexporter/go.sum b/exporter/tencentcloudlogserviceexporter/go.sum
index 65d5b0b90ebf..557206847df1 100644
--- a/exporter/tencentcloudlogserviceexporter/go.sum
+++ b/exporter/tencentcloudlogserviceexporter/go.sum
@@ -56,8 +56,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1042 h1:8aMk2lo6ET6GdcqoYdsadfUW+JuZ/2W5gXU4dtQAR1E=
-github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1042/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
+github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1048 h1:WvooPEWRp/0KDvmRVyTSW8jObgWAH2hDYiRCcHsDmPw=
+github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1048/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/collector/component v0.114.0 h1:SVGbm5LvHGSTEDv7p92oPuBgK5tuiWR82I9+LL4TtBE=
From 290e3b3687d51996dcf448abe3f7456c8bbd4c95 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 12:10:48 -0800
Subject: [PATCH 12/23] Update module github.com/relvacode/iso8601 to v1.6.0
(#36544)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
|
[github.com/relvacode/iso8601](https://redirect.github.com/relvacode/iso8601)
| `v1.5.0` -> `v1.6.0` |
[![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2frelvacode%2fiso8601/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2frelvacode%2fiso8601/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2frelvacode%2fiso8601/v1.5.0/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2frelvacode%2fiso8601/v1.5.0/v1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
---
> [!WARNING]
> Some dependencies could not be looked up. Check the Dependency
Dashboard for more information.
---
### Release Notes
relvacode/iso8601 (github.com/relvacode/iso8601)
###
[`v1.6.0`](https://redirect.github.com/relvacode/iso8601/releases/tag/v1.6.0)
[Compare
Source](https://redirect.github.com/relvacode/iso8601/compare/v1.5.0...v1.6.0)
##### What's Changed
- Add ParseInLocation by
[@squarespirit](https://redirect.github.com/squarespirit) in
[https://github.com/relvacode/iso8601/pull/28](https://redirect.github.com/relvacode/iso8601/pull/28)
##### New Contributors
- [@squarespirit](https://redirect.github.com/squarespirit) made
their first contribution in
[https://github.com/relvacode/iso8601/pull/28](https://redirect.github.com/relvacode/iso8601/pull/28)
**Full Changelog**:
https://github.com/relvacode/iso8601/compare/v1.5.0...v1.6.0
---
### Configuration
📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any
time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.
â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.
---
- [ ] If you want to rebase/retry this PR, check
this box
---
This PR was generated by [Mend Renovate](https://mend.io/renovate/).
View the [repository job
log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib).
---------
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com>
Co-authored-by: Alex Boten <223565+codeboten@users.noreply.github.com>
---
pkg/translator/azure/go.mod | 2 +-
pkg/translator/azure/go.sum | 4 ++--
pkg/translator/azurelogs/go.mod | 2 +-
pkg/translator/azurelogs/go.sum | 4 ++--
receiver/azureeventhubreceiver/go.mod | 2 +-
receiver/azureeventhubreceiver/go.sum | 4 ++--
receiver/kafkareceiver/go.mod | 2 +-
receiver/kafkareceiver/go.sum | 4 ++--
8 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/pkg/translator/azure/go.mod b/pkg/translator/azure/go.mod
index 564e6b82953d..fd2eca72425a 100644
--- a/pkg/translator/azure/go.mod
+++ b/pkg/translator/azure/go.mod
@@ -5,7 +5,7 @@ go 1.22.0
require (
github.com/json-iterator/go v1.1.12
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.114.0
- github.com/relvacode/iso8601 v1.5.0
+ github.com/relvacode/iso8601 v1.6.0
github.com/stretchr/testify v1.10.0
go.opentelemetry.io/collector/component v0.114.0
go.opentelemetry.io/collector/pdata v1.20.0
diff --git a/pkg/translator/azure/go.sum b/pkg/translator/azure/go.sum
index 875d5895bf07..1764ab07e058 100644
--- a/pkg/translator/azure/go.sum
+++ b/pkg/translator/azure/go.sum
@@ -27,8 +27,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/relvacode/iso8601 v1.5.0 h1:hM+cirGvOz6gKuUEqimr5TH3tiQiVOuc2QIO+nI5fY4=
-github.com/relvacode/iso8601 v1.5.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
+github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
+github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
diff --git a/pkg/translator/azurelogs/go.mod b/pkg/translator/azurelogs/go.mod
index 79d752d4b100..cf415a5979c1 100644
--- a/pkg/translator/azurelogs/go.mod
+++ b/pkg/translator/azurelogs/go.mod
@@ -5,7 +5,7 @@ go 1.22.0
require (
github.com/json-iterator/go v1.1.12
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.114.0
- github.com/relvacode/iso8601 v1.5.0
+ github.com/relvacode/iso8601 v1.6.0
github.com/stretchr/testify v1.10.0
go.opentelemetry.io/collector/component v0.114.0
go.opentelemetry.io/collector/pdata v1.20.0
diff --git a/pkg/translator/azurelogs/go.sum b/pkg/translator/azurelogs/go.sum
index efcd6cfa6568..0d5b13eaedb7 100644
--- a/pkg/translator/azurelogs/go.sum
+++ b/pkg/translator/azurelogs/go.sum
@@ -27,8 +27,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/relvacode/iso8601 v1.5.0 h1:hM+cirGvOz6gKuUEqimr5TH3tiQiVOuc2QIO+nI5fY4=
-github.com/relvacode/iso8601 v1.5.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
+github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
+github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
diff --git a/receiver/azureeventhubreceiver/go.mod b/receiver/azureeventhubreceiver/go.mod
index 4535296d0dd7..18b387a8cbf2 100644
--- a/receiver/azureeventhubreceiver/go.mod
+++ b/receiver/azureeventhubreceiver/go.mod
@@ -10,7 +10,7 @@ require (
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.114.0
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.114.0
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azurelogs v0.114.0
- github.com/relvacode/iso8601 v1.5.0
+ github.com/relvacode/iso8601 v1.6.0
github.com/stretchr/testify v1.10.0
go.opentelemetry.io/collector/component v0.114.0
go.opentelemetry.io/collector/component/componenttest v0.114.0
diff --git a/receiver/azureeventhubreceiver/go.sum b/receiver/azureeventhubreceiver/go.sum
index fbedbba14cd9..80376a801d69 100644
--- a/receiver/azureeventhubreceiver/go.sum
+++ b/receiver/azureeventhubreceiver/go.sum
@@ -147,8 +147,8 @@ github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPA
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/relvacode/iso8601 v1.5.0 h1:hM+cirGvOz6gKuUEqimr5TH3tiQiVOuc2QIO+nI5fY4=
-github.com/relvacode/iso8601 v1.5.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
+github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
+github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
diff --git a/receiver/kafkareceiver/go.mod b/receiver/kafkareceiver/go.mod
index 190e685ea301..40a908a46084 100644
--- a/receiver/kafkareceiver/go.mod
+++ b/receiver/kafkareceiver/go.mod
@@ -73,7 +73,7 @@ require (
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
- github.com/relvacode/iso8601 v1.5.0 // indirect
+ github.com/relvacode/iso8601 v1.6.0 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
diff --git a/receiver/kafkareceiver/go.sum b/receiver/kafkareceiver/go.sum
index f84bc9e8a486..6ec53ada1020 100644
--- a/receiver/kafkareceiver/go.sum
+++ b/receiver/kafkareceiver/go.sum
@@ -100,8 +100,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/relvacode/iso8601 v1.5.0 h1:hM+cirGvOz6gKuUEqimr5TH3tiQiVOuc2QIO+nI5fY4=
-github.com/relvacode/iso8601 v1.5.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
+github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
+github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
From ea2967373b5b41e7b77dd9489d86345b38d7a206 Mon Sep 17 00:00:00 2001
From: Jason Barto
Date: Wed, 27 Nov 2024 20:28:54 +0000
Subject: [PATCH 13/23] Add support for 1-second Storage Resolution in the AWS
EMF Exporter (#36057)
This change implements the capability for users of the AWS EMF Exporter
to specify which metrics they would like to have sent to CloudWatch with
a 1 second Storage Resolution. The EMF Exporter now explicitly states
the Storage Resolution for each metric as 60 seconds, the previous
implicit default, so there is no behavior change. If the user specifies
a metric to have 1 second resolution it will be sent to CloudWatch EMF
with the Storage Resolution set accordingly.
#### Description
Previously the AWS EMF Exporter sent metric data into CloudWatch without
specifying the storage resolution. CloudWatch would then default to a 60
second storage resolution, even if metrics are sent more frequently than
every 60 seconds. This would confuse users when they try to apply
functions like AVG, SUM, MAX, or MIN to their metrics with a period of 5
seconds. The function would be applied by CloudWatch to 60 seconds worth
of data and produced unexpected results and confusion for the user. This
commit makes this 60 second resolution explicit in the messages sent to
CloudWatch by the EMF Exporter and also gives the user the option to
specify a more granular 1 second resolution per metric in the
configuration file of the AWS EMF Exporter.
#### Link to tracking issue
Fixes #29506
#### Testing
Added tests to verify that config file parsing validates a metric
descriptor that specifies either a valid unit, valid storage resolution,
or both and rejects other invalid metric descriptors.
Added tests that the translation from metric data to CW EMF carries a
storage resolution with it, defaulting to a value of 60 (current
behavior) if no storage resolution valid is explicitly set in the
configuration.
#### Documentation
Comments added in the code but have not updated the README.md pending
acceptance of the PR.
---
.chloggen/awsemf_storageresolution.yaml | 27 +++
exporter/awsemfexporter/README.md | 8 +
exporter/awsemfexporter/metric_translator.go | 60 ++++-
.../awsemfexporter/metric_translator_test.go | 226 ++++++++++--------
4 files changed, 212 insertions(+), 109 deletions(-)
create mode 100644 .chloggen/awsemf_storageresolution.yaml
diff --git a/.chloggen/awsemf_storageresolution.yaml b/.chloggen/awsemf_storageresolution.yaml
new file mode 100644
index 000000000000..bdd7dbad2304
--- /dev/null
+++ b/.chloggen/awsemf_storageresolution.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: awsemfexporter
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Add support for 1 second metric resolution in CloudWatch Embedded Metrics Format based on metric attributes
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [29506]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/exporter/awsemfexporter/README.md b/exporter/awsemfexporter/README.md
index 8e79a58c41cf..6eb5d1dd4ebf 100644
--- a/exporter/awsemfexporter/README.md
+++ b/exporter/awsemfexporter/README.md
@@ -85,6 +85,14 @@ This exporter follows default credential resolution for the
Follow the [guidelines](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) for the
credential configuration.
+## Metric Attributes
+By setting attributes on your metrics you can change how individual metrics are sent to CloudWatch. Attributes can be set in code or using components like the [Attribute Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/attributesprocessor).
+
+The AWS EMF Exporter will interpret the following metric attributes to change how it publishes metrics to CloudWatch:
+
+| Attribute Name | Description | Default |
+| :---------------- | :--------------------------------------------------------------------- | ------- |
+| `aws.emf.storage_resolution` | This attribute should be set to an integer value of `1` or `60`. When sending the metric value to CloudWatch use the specified storage resolution value. CloudWatch currently supports a storage resolution of `1` or `60` to indicate 1 second or 60 second resolution. | `aws.emf.storage_resolution = 60` |
## Configuration Examples
diff --git a/exporter/awsemfexporter/metric_translator.go b/exporter/awsemfexporter/metric_translator.go
index bb16a5ea3346..b5d9330503ce 100644
--- a/exporter/awsemfexporter/metric_translator.go
+++ b/exporter/awsemfexporter/metric_translator.go
@@ -7,6 +7,7 @@ import (
"encoding/json"
"fmt"
"reflect"
+ "strconv"
"time"
"go.opentelemetry.io/collector/pdata/pmetric"
@@ -29,6 +30,9 @@ const (
prometheusReceiver = "prometheus"
attributeReceiver = "receiver"
fieldPrometheusMetricType = "prom_metric_type"
+
+ // metric attributes for AWS EMF, not to be treated as metric labels
+ emfStorageResolutionAttribute = "aws.emf.storage_resolution"
)
var fieldPrometheusTypes = map[pmetric.MetricType]string{
@@ -45,10 +49,16 @@ type cWMetrics struct {
fields map[string]any
}
+type cWMetricInfo struct {
+ Name string
+ Unit string
+ StorageResolution int
+}
+
type cWMeasurement struct {
Namespace string
Dimensions [][]string
- Metrics []map[string]string
+ Metrics []cWMetricInfo
}
type cWMetricStats struct {
@@ -156,7 +166,7 @@ func (mt metricTranslator) translateOTelToGroupedMetric(rm pmetric.ResourceMetri
// translateGroupedMetricToCWMetric converts Grouped Metric format to CloudWatch Metric format.
func translateGroupedMetricToCWMetric(groupedMetric *groupedMetric, config *Config) *cWMetrics {
- labels := groupedMetric.labels
+ labels := filterAWSEMFAttributes(groupedMetric.labels)
fieldsLength := len(labels) + len(groupedMetric.metrics)
isPrometheusMetric := groupedMetric.metadata.receiver == prometheusReceiver
@@ -198,7 +208,7 @@ func translateGroupedMetricToCWMetric(groupedMetric *groupedMetric, config *Conf
// groupedMetricToCWMeasurement creates a single CW Measurement from a grouped metric.
func groupedMetricToCWMeasurement(groupedMetric *groupedMetric, config *Config) cWMeasurement {
- labels := groupedMetric.labels
+ labels := filterAWSEMFAttributes(groupedMetric.labels)
dimensionRollupOption := config.DimensionRollupOption
// Create a dimension set containing list of label names
@@ -208,6 +218,7 @@ func groupedMetricToCWMeasurement(groupedMetric *groupedMetric, config *Config)
dimSet[idx] = labelName
idx++
}
+
dimensions := [][]string{dimSet}
// Apply single/zero dimension rollup to labels
@@ -228,14 +239,20 @@ func groupedMetricToCWMeasurement(groupedMetric *groupedMetric, config *Config)
// Add on rolled-up dimensions
dimensions = append(dimensions, rollupDimensionArray...)
- metrics := make([]map[string]string, len(groupedMetric.metrics))
+ metrics := make([]cWMetricInfo, len(groupedMetric.metrics))
idx = 0
for metricName, metricInfo := range groupedMetric.metrics {
- metrics[idx] = map[string]string{
- "Name": metricName,
+ metrics[idx] = cWMetricInfo{
+ Name: metricName,
+ StorageResolution: 60,
}
if metricInfo.unit != "" {
- metrics[idx]["Unit"] = metricInfo.unit
+ metrics[idx].Unit = metricInfo.unit
+ }
+ if storRes, ok := groupedMetric.labels[emfStorageResolutionAttribute]; ok {
+ if storResInt, err := strconv.Atoi(storRes); err == nil {
+ metrics[idx].StorageResolution = storResInt
+ }
}
idx++
}
@@ -250,7 +267,7 @@ func groupedMetricToCWMeasurement(groupedMetric *groupedMetric, config *Config)
// groupedMetricToCWMeasurementsWithFilters filters the grouped metric using the given list of metric
// declarations and returns the corresponding list of CW Measurements.
func groupedMetricToCWMeasurementsWithFilters(groupedMetric *groupedMetric, config *Config) (cWMeasurements []cWMeasurement) {
- labels := groupedMetric.labels
+ labels := filterAWSEMFAttributes(groupedMetric.labels)
// Filter metric declarations by labels
metricDeclarations := make([]*MetricDeclaration, 0, len(config.MetricDeclarations))
@@ -278,7 +295,7 @@ func groupedMetricToCWMeasurementsWithFilters(groupedMetric *groupedMetric, conf
// Group metrics by matched metric declarations
type metricDeclarationGroup struct {
metricDeclIdxList []int
- metrics []map[string]string
+ metrics []cWMetricInfo
}
metricDeclGroups := make(map[string]*metricDeclarationGroup)
@@ -299,11 +316,17 @@ func groupedMetricToCWMeasurementsWithFilters(groupedMetric *groupedMetric, conf
continue
}
- metric := map[string]string{
- "Name": metricName,
+ metric := cWMetricInfo{
+ Name: metricName,
+ StorageResolution: 60,
}
if metricInfo.unit != "" {
- metric["Unit"] = metricInfo.unit
+ metric.Unit = metricInfo.unit
+ }
+ if storRes, ok := groupedMetric.labels[emfStorageResolutionAttribute]; ok {
+ if storResInt, err := strconv.Atoi(storRes); err == nil {
+ metric.StorageResolution = storResInt
+ }
}
metricDeclKey := fmt.Sprint(metricDeclIdx)
if group, ok := metricDeclGroups[metricDeclKey]; ok {
@@ -311,7 +334,7 @@ func groupedMetricToCWMeasurementsWithFilters(groupedMetric *groupedMetric, conf
} else {
metricDeclGroups[metricDeclKey] = &metricDeclarationGroup{
metricDeclIdxList: metricDeclIdx,
- metrics: []map[string]string{metric},
+ metrics: []cWMetricInfo{metric},
}
}
}
@@ -465,3 +488,14 @@ func translateGroupedMetricToEmf(groupedMetric *groupedMetric, config *Config, d
return event, nil
}
+
+func filterAWSEMFAttributes(labels map[string]string) map[string]string {
+ // remove any labels that are attributes specific to AWS EMF Exporter
+ filteredLabels := make(map[string]string)
+ for labelName := range labels {
+ if labelName != emfStorageResolutionAttribute {
+ filteredLabels[labelName] = labels[labelName]
+ }
+ }
+ return filteredLabels
+}
diff --git a/exporter/awsemfexporter/metric_translator_test.go b/exporter/awsemfexporter/metric_translator_test.go
index 15c74767d7d8..3a8f9268624c 100644
--- a/exporter/awsemfexporter/metric_translator_test.go
+++ b/exporter/awsemfexporter/metric_translator_test.go
@@ -163,11 +163,11 @@ func normalizeDimensionality(dims [][]string) [][]string {
}
// hashMetricSlice hashes a metrics slice for equality checking.
-func hashMetricSlice(metricSlice []map[string]string) []string {
+func hashMetricSlice(metricSlice []cWMetricInfo) []string {
// Convert to string for easier sorting
stringified := make([]string, len(metricSlice))
for i, v := range metricSlice {
- stringified[i] = v["Name"] + "," + v["Unit"]
+ stringified[i] = fmt.Sprint(v.Name) + "," + fmt.Sprint(v.Unit) + "," + fmt.Sprint(v.StorageResolution)
}
// Sort across metrics for equality checking
sort.Strings(stringified)
@@ -397,24 +397,26 @@ func TestTranslateCWMetricToEMF(t *testing.T) {
measurements: []cWMeasurement{{
Namespace: "test-emf",
Dimensions: [][]string{{oTellibDimensionKey}, {oTellibDimensionKey, "spanName"}},
- Metrics: []map[string]string{{
- "Name": "spanCounter",
- "Unit": "Count",
+ Metrics: []cWMetricInfo{{
+ Name: "spanCounter",
+ Unit: "Count",
+ StorageResolution: 1,
}},
}},
- expectedEMFLogEvent: "{\"OTelLib\":\"cloudwatch-otel\",\"Sources\":[\"cadvisor\",\"pod\",\"calculated\"],\"Version\":\"1\",\"_aws\":{\"CloudWatchMetrics\":[{\"Namespace\":\"test-emf\",\"Dimensions\":[[\"OTelLib\"],[\"OTelLib\",\"spanName\"]],\"Metrics\":[{\"Name\":\"spanCounter\",\"Unit\":\"Count\"}]}],\"Timestamp\":1596151098037},\"kubernetes\":{\"container_name\":\"cloudwatch-agent\",\"docker\":{\"container_id\":\"fc1b0a4c3faaa1808e187486a3a90cbea883dccaf2e2c46d4069d663b032a1ca\"},\"host\":\"ip-192-168-58-245.ec2.internal\",\"labels\":{\"controller-revision-hash\":\"5bdbf497dc\",\"name\":\"cloudwatch-agent\",\"pod-template-generation\":\"1\"},\"namespace_name\":\"amazon-cloudwatch\",\"pod_id\":\"e23f3413-af2e-4a98-89e0-5df2251e7f05\",\"pod_name\":\"cloudwatch-agent-26bl6\",\"pod_owners\":[{\"owner_kind\":\"DaemonSet\",\"owner_name\":\"cloudwatch-agent\"}]},\"spanCounter\":0,\"spanName\":\"test\"}",
+ expectedEMFLogEvent: "{\"OTelLib\":\"cloudwatch-otel\",\"Sources\":[\"cadvisor\",\"pod\",\"calculated\"],\"Version\":\"1\",\"_aws\":{\"CloudWatchMetrics\":[{\"Namespace\":\"test-emf\",\"Dimensions\":[[\"OTelLib\"],[\"OTelLib\",\"spanName\"]],\"Metrics\":[{\"Name\":\"spanCounter\",\"Unit\":\"Count\",\"StorageResolution\":1}]}],\"Timestamp\":1596151098037},\"kubernetes\":{\"container_name\":\"cloudwatch-agent\",\"docker\":{\"container_id\":\"fc1b0a4c3faaa1808e187486a3a90cbea883dccaf2e2c46d4069d663b032a1ca\"},\"host\":\"ip-192-168-58-245.ec2.internal\",\"labels\":{\"controller-revision-hash\":\"5bdbf497dc\",\"name\":\"cloudwatch-agent\",\"pod-template-generation\":\"1\"},\"namespace_name\":\"amazon-cloudwatch\",\"pod_id\":\"e23f3413-af2e-4a98-89e0-5df2251e7f05\",\"pod_name\":\"cloudwatch-agent-26bl6\",\"pod_owners\":[{\"owner_kind\":\"DaemonSet\",\"owner_name\":\"cloudwatch-agent\"}]},\"spanCounter\":0,\"spanName\":\"test\"}",
},
"WithMeasurementAndEMFV0": {
emfVersion: "0",
measurements: []cWMeasurement{{
Namespace: "test-emf",
Dimensions: [][]string{{oTellibDimensionKey}, {oTellibDimensionKey, "spanName"}},
- Metrics: []map[string]string{{
- "Name": "spanCounter",
- "Unit": "Count",
+ Metrics: []cWMetricInfo{{
+ Name: "spanCounter",
+ Unit: "Count",
+ StorageResolution: 60,
}},
}},
- expectedEMFLogEvent: "{\"CloudWatchMetrics\":[{\"Namespace\":\"test-emf\",\"Dimensions\":[[\"OTelLib\"],[\"OTelLib\",\"spanName\"]],\"Metrics\":[{\"Name\":\"spanCounter\",\"Unit\":\"Count\"}]}],\"OTelLib\":\"cloudwatch-otel\",\"Sources\":[\"cadvisor\",\"pod\",\"calculated\"],\"Timestamp\":\"1596151098037\",\"Version\":\"0\",\"kubernetes\":{\"container_name\":\"cloudwatch-agent\",\"docker\":{\"container_id\":\"fc1b0a4c3faaa1808e187486a3a90cbea883dccaf2e2c46d4069d663b032a1ca\"},\"host\":\"ip-192-168-58-245.ec2.internal\",\"labels\":{\"controller-revision-hash\":\"5bdbf497dc\",\"name\":\"cloudwatch-agent\",\"pod-template-generation\":\"1\"},\"namespace_name\":\"amazon-cloudwatch\",\"pod_id\":\"e23f3413-af2e-4a98-89e0-5df2251e7f05\",\"pod_name\":\"cloudwatch-agent-26bl6\",\"pod_owners\":[{\"owner_kind\":\"DaemonSet\",\"owner_name\":\"cloudwatch-agent\"}]},\"spanCounter\":0,\"spanName\":\"test\"}",
+ expectedEMFLogEvent: "{\"CloudWatchMetrics\":[{\"Namespace\":\"test-emf\",\"Dimensions\":[[\"OTelLib\"],[\"OTelLib\",\"spanName\"]],\"Metrics\":[{\"Name\":\"spanCounter\",\"Unit\":\"Count\",\"StorageResolution\":60}]}],\"OTelLib\":\"cloudwatch-otel\",\"Sources\":[\"cadvisor\",\"pod\",\"calculated\"],\"Timestamp\":\"1596151098037\",\"Version\":\"0\",\"kubernetes\":{\"container_name\":\"cloudwatch-agent\",\"docker\":{\"container_id\":\"fc1b0a4c3faaa1808e187486a3a90cbea883dccaf2e2c46d4069d663b032a1ca\"},\"host\":\"ip-192-168-58-245.ec2.internal\",\"labels\":{\"controller-revision-hash\":\"5bdbf497dc\",\"name\":\"cloudwatch-agent\",\"pod-template-generation\":\"1\"},\"namespace_name\":\"amazon-cloudwatch\",\"pod_id\":\"e23f3413-af2e-4a98-89e0-5df2251e7f05\",\"pod_name\":\"cloudwatch-agent-26bl6\",\"pod_owners\":[{\"owner_kind\":\"DaemonSet\",\"owner_name\":\"cloudwatch-agent\"}]},\"spanCounter\":0,\"spanName\":\"test\"}",
},
"WithNoMeasurementAndEMFV1": {
emfVersion: "1",
@@ -493,10 +495,11 @@ func TestTranslateGroupedMetricToCWMetric(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"label1"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
@@ -538,10 +541,11 @@ func TestTranslateGroupedMetricToCWMetric(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"label1"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
@@ -587,18 +591,21 @@ func TestTranslateGroupedMetricToCWMetric(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"label1", "label2"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -662,20 +669,22 @@ func TestTranslateGroupedMetricToCWMetric(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"label1"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
{
Namespace: namespace,
Dimensions: [][]string{{"label1", "label2"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
@@ -746,10 +755,11 @@ func TestTranslateGroupedMetricToCWMetric(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"label1"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
@@ -816,10 +826,11 @@ func TestGroupedMetricToCWMeasurement(t *testing.T) {
cWMeasurement{
Namespace: namespace,
Dimensions: [][]string{{"label1"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
@@ -856,18 +867,21 @@ func TestGroupedMetricToCWMeasurement(t *testing.T) {
cWMeasurement{
Namespace: namespace,
Dimensions: [][]string{{"label1", "label2"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -895,10 +909,11 @@ func TestGroupedMetricToCWMeasurement(t *testing.T) {
cWMeasurement{
Namespace: namespace,
Dimensions: [][]string{{"label1"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
@@ -940,18 +955,21 @@ func TestGroupedMetricToCWMeasurement(t *testing.T) {
{"label2"},
{},
},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -1183,18 +1201,21 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"a"}, {"a", "c"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -1220,30 +1241,33 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"a"}, {"b"}, {"a", "c"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
{
Namespace: namespace,
Dimensions: [][]string{{"a"}, {"b"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
{
Namespace: namespace,
Dimensions: [][]string{{"a"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -1265,24 +1289,27 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"a"}, {"b"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
{
Namespace: namespace,
Dimensions: [][]string{{"a"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -1304,14 +1331,16 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"a"}, {"b"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
},
},
@@ -1339,14 +1368,16 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -1380,18 +1411,21 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) {
{
Namespace: namespace,
Dimensions: [][]string{{"b"}},
- Metrics: []map[string]string{
+ Metrics: []cWMetricInfo{
{
- "Name": "metric1",
- "Unit": "Count",
+ Name: "metric1",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric2",
- "Unit": "Count",
+ Name: "metric2",
+ Unit: "Count",
+ StorageResolution: 60,
},
{
- "Name": "metric3",
- "Unit": "Seconds",
+ Name: "metric3",
+ Unit: "Seconds",
+ StorageResolution: 60,
},
},
},
@@ -2154,9 +2188,9 @@ func BenchmarkTranslateCWMetricToEMF(b *testing.B) {
cwMeasurement := cWMeasurement{
Namespace: "test-emf",
Dimensions: [][]string{{oTellibDimensionKey}, {oTellibDimensionKey, "spanName"}},
- Metrics: []map[string]string{{
- "Name": "spanCounter",
- "Unit": "Count",
+ Metrics: []cWMetricInfo{{
+ Name: "spanCounter",
+ Unit: "Count",
}},
}
timestamp := int64(1596151098037)
From e899e0cdced8f73a312cacf0a28a98b13863c9c4 Mon Sep 17 00:00:00 2001
From: Srikanth Chekuri
Date: Thu, 28 Nov 2024 02:00:49 +0530
Subject: [PATCH 14/23] [cmd/opampsupervisor]: update defult output paths for
supervisor logger (#36072)
Co-authored-by: Evan Bradley <11745660+evan-bradley@users.noreply.github.com>
---
...opamp_supervisor_default_output_paths.yaml | 28 +++++++++++++++++++
cmd/opampsupervisor/main.go | 2 +-
.../supervisor/config/config.go | 2 +-
cmd/opampsupervisor/supervisor/logger.go | 4 +--
cmd/opampsupervisor/supervisor/supervisor.go | 6 ++--
.../supervisor/supervisor_test.go | 4 +--
6 files changed, 37 insertions(+), 9 deletions(-)
create mode 100644 .chloggen/opamp_supervisor_default_output_paths.yaml
diff --git a/.chloggen/opamp_supervisor_default_output_paths.yaml b/.chloggen/opamp_supervisor_default_output_paths.yaml
new file mode 100644
index 000000000000..0ca722205166
--- /dev/null
+++ b/.chloggen/opamp_supervisor_default_output_paths.yaml
@@ -0,0 +1,28 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: breaking
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: cmd/opampsupervisor
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Update default logger output paths to stderr
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36072]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext: |
+ The default output paths for the opamp supervisor logger have been updated to stderr from [stdout, stderr].
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/cmd/opampsupervisor/main.go b/cmd/opampsupervisor/main.go
index 533a1f54c591..17a62e2033cd 100644
--- a/cmd/opampsupervisor/main.go
+++ b/cmd/opampsupervisor/main.go
@@ -35,7 +35,7 @@ func runInteractive() error {
return fmt.Errorf("failed to create logger: %w", err)
}
- supervisor, err := supervisor.NewSupervisor(logger, cfg)
+ supervisor, err := supervisor.NewSupervisor(logger.Named("supervisor"), cfg)
if err != nil {
return fmt.Errorf("failed to create supervisor: %w", err)
}
diff --git a/cmd/opampsupervisor/supervisor/config/config.go b/cmd/opampsupervisor/supervisor/config/config.go
index 4bc88b63fff1..5cdb6938e325 100644
--- a/cmd/opampsupervisor/supervisor/config/config.go
+++ b/cmd/opampsupervisor/supervisor/config/config.go
@@ -255,7 +255,7 @@ func DefaultSupervisor() Supervisor {
Telemetry: Telemetry{
Logs: Logs{
Level: zapcore.InfoLevel,
- OutputPaths: []string{"stdout", "stderr"},
+ OutputPaths: []string{"stderr"},
},
},
}
diff --git a/cmd/opampsupervisor/supervisor/logger.go b/cmd/opampsupervisor/supervisor/logger.go
index 11811d539372..863b4aba7f5f 100644
--- a/cmd/opampsupervisor/supervisor/logger.go
+++ b/cmd/opampsupervisor/supervisor/logger.go
@@ -26,8 +26,8 @@ func (o *opAMPLogger) Errorf(_ context.Context, format string, v ...any) {
o.l.Errorf(format, v...)
}
-func newLoggerFromZap(l *zap.Logger) types.Logger {
+func newLoggerFromZap(l *zap.Logger, name string) types.Logger {
return &opAMPLogger{
- l: l.Sugar(),
+ l: l.Sugar().Named(name),
}
}
diff --git a/cmd/opampsupervisor/supervisor/supervisor.go b/cmd/opampsupervisor/supervisor/supervisor.go
index ca336b756ed0..d1a4e03f1c8d 100644
--- a/cmd/opampsupervisor/supervisor/supervisor.go
+++ b/cmd/opampsupervisor/supervisor/supervisor.go
@@ -289,7 +289,7 @@ func (s *Supervisor) getBootstrapInfo() (err error) {
return fmt.Errorf("failed to write agent config: %w", err)
}
- srv := server.New(newLoggerFromZap(s.logger))
+ srv := server.New(newLoggerFromZap(s.logger, "opamp-server"))
done := make(chan error, 1)
var connected atomic.Bool
@@ -387,7 +387,7 @@ func (s *Supervisor) startOpAMP() error {
}
func (s *Supervisor) startOpAMPClient() error {
- s.opampClient = client.NewWebSocket(newLoggerFromZap(s.logger))
+ s.opampClient = client.NewWebSocket(newLoggerFromZap(s.logger, "opamp-client"))
// determine if we need to load a TLS config or not
var tlsConfig *tls.Config
@@ -465,7 +465,7 @@ func (s *Supervisor) startOpAMPClient() error {
// depending on information received by the Supervisor from the remote
// OpAMP server.
func (s *Supervisor) startOpAMPServer() error {
- s.opampServer = server.New(newLoggerFromZap(s.logger))
+ s.opampServer = server.New(newLoggerFromZap(s.logger, "opamp-server"))
var err error
s.opampServerPort, err = s.getSupervisorOpAMPServerPort()
diff --git a/cmd/opampsupervisor/supervisor/supervisor_test.go b/cmd/opampsupervisor/supervisor/supervisor_test.go
index 24301fc19626..bb68c7abf538 100644
--- a/cmd/opampsupervisor/supervisor/supervisor_test.go
+++ b/cmd/opampsupervisor/supervisor/supervisor_test.go
@@ -183,7 +183,7 @@ func Test_onMessage(t *testing.T) {
cfgState: &atomic.Value{},
effectiveConfig: &atomic.Value{},
agentHealthCheckEndpoint: "localhost:8000",
- opampClient: client.NewHTTP(newLoggerFromZap(zap.NewNop())),
+ opampClient: client.NewHTTP(newLoggerFromZap(zap.NewNop(), "opamp-client")),
}
require.NoError(t, s.createTemplates())
@@ -339,7 +339,7 @@ func Test_onMessage(t *testing.T) {
cfgState: &atomic.Value{},
effectiveConfig: &atomic.Value{},
agentHealthCheckEndpoint: "localhost:8000",
- opampClient: client.NewHTTP(newLoggerFromZap(zap.NewNop())),
+ opampClient: client.NewHTTP(newLoggerFromZap(zap.NewNop(), "opamp-client")),
}
require.NoError(t, s.createTemplates())
From 24f71bbea08f7d6f57874e050fe9799deb21af34 Mon Sep 17 00:00:00 2001
From: Srikanth Chekuri
Date: Thu, 28 Nov 2024 02:01:46 +0530
Subject: [PATCH 15/23] [cmd/opampsupervisor]: do not log err when last
received doesn't exist (#36014)
---
...do_not_log_when_no_prev_config_exists.yaml | 27 +++++++++++++++++++
cmd/opampsupervisor/supervisor/supervisor.go | 7 +++--
2 files changed, 32 insertions(+), 2 deletions(-)
create mode 100644 .chloggen/36014_do_not_log_when_no_prev_config_exists.yaml
diff --git a/.chloggen/36014_do_not_log_when_no_prev_config_exists.yaml b/.chloggen/36014_do_not_log_when_no_prev_config_exists.yaml
new file mode 100644
index 000000000000..3e203c92262c
--- /dev/null
+++ b/.chloggen/36014_do_not_log_when_no_prev_config_exists.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: bug_fix
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: cmd/opampsupervisor
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Do not log err if the last received doesn't exist
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36013]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/cmd/opampsupervisor/supervisor/supervisor.go b/cmd/opampsupervisor/supervisor/supervisor.go
index d1a4e03f1c8d..aca687978d22 100644
--- a/cmd/opampsupervisor/supervisor/supervisor.go
+++ b/cmd/opampsupervisor/supervisor/supervisor.go
@@ -782,7 +782,8 @@ func (s *Supervisor) loadAndWriteInitialMergedConfig() error {
if s.config.Capabilities.AcceptsRemoteConfig {
// Try to load the last received remote config if it exists.
lastRecvRemoteConfig, err = os.ReadFile(filepath.Join(s.config.Storage.Directory, lastRecvRemoteConfigFile))
- if err == nil {
+ switch {
+ case err == nil:
config := &protobufs.AgentRemoteConfig{}
err = proto.Unmarshal(lastRecvRemoteConfig, config)
if err != nil {
@@ -790,7 +791,9 @@ func (s *Supervisor) loadAndWriteInitialMergedConfig() error {
} else {
s.remoteConfig = config
}
- } else {
+ case errors.Is(err, os.ErrNotExist):
+ s.logger.Info("No last received remote config found")
+ default:
s.logger.Error("error while reading last received config", zap.Error(err))
}
} else {
From c674d708463f07760a5a1c5e374df83f77b6ec72 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 15:01:49 -0800
Subject: [PATCH 16/23] Update dependency tcort/markdown-link-check to v3.13.6
(#36555)
---
.github/workflows/changelog.yml | 2 +-
.github/workflows/check-links.yaml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml
index 97a9d78c2b3d..b9af5b10798e 100644
--- a/.github/workflows/changelog.yml
+++ b/.github/workflows/changelog.yml
@@ -16,7 +16,7 @@ env:
# We limit cache download as a whole to 5 minutes.
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2
# renovate: datasource=github-releases depName=tcort/markdown-link-check
- MD_LINK_CHECK_VERSION: "3.12.2"
+ MD_LINK_CHECK_VERSION: "3.13.6"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml
index bbfd0bb2ed49..911cc4563eab 100644
--- a/.github/workflows/check-links.yaml
+++ b/.github/workflows/check-links.yaml
@@ -11,7 +11,7 @@ concurrency:
env:
# renovate: datasource=github-releases depName=tcort/markdown-link-check
- MD_LINK_CHECK_VERSION: "3.12.2"
+ MD_LINK_CHECK_VERSION: "3.13.6"
jobs:
changedfiles:
From adbb96afd6f223bf446952152f881d2a680071fc Mon Sep 17 00:00:00 2001
From: Tim
Date: Wed, 27 Nov 2024 15:03:52 -0800
Subject: [PATCH 17/23] [receiver/postgresqlreceiver] Added new postgresql
metrics to acheive parity with Telegraf (#36528)
---
.chloggen/chan-tim_postgresMetrics.yaml | 27 ++
receiver/postgresqlreceiver/client.go | 27 +-
receiver/postgresqlreceiver/documentation.md | 56 +++
.../postgresqlreceiver/integration_test.go | 7 +
.../internal/metadata/generated_config.go | 28 ++
.../metadata/generated_config_test.go | 14 +
.../internal/metadata/generated_metrics.go | 413 ++++++++++++++++++
.../metadata/generated_metrics_test.go | 119 +++++
.../internal/metadata/testdata/config.yaml | 28 ++
receiver/postgresqlreceiver/metadata.yaml | 57 ++-
receiver/postgresqlreceiver/scraper.go | 7 +
receiver/postgresqlreceiver/scraper_test.go | 91 ++++
.../testdata/integration/expected_all_db.yaml | 210 +++++++++
.../integration/expected_all_db_connpool.yaml | 210 +++++++++
.../expected_all_db_schemaattr.yaml | 210 +++++++++
.../integration/expected_multi_db.yaml | 140 ++++++
.../expected_multi_db_connpool.yaml | 140 ++++++
.../expected_multi_db_schemaattr.yaml | 140 ++++++
.../integration/expected_single_db.yaml | 70 +++
.../expected_single_db_connpool.yaml | 70 +++
.../expected_single_db_schemaattr.yaml | 70 +++
.../testdata/scraper/multiple/expected.yaml | 210 +++++++++
.../multiple/expected_imprecise_lag.yaml | 210 +++++++++
.../expected_imprecise_lag_schemaattr.yaml | 210 +++++++++
.../scraper/multiple/expected_schemaattr.yaml | 210 +++++++++
.../testdata/scraper/otel/expected.yaml | 70 +++
.../scraper/otel/expected_schemaattr.yaml | 70 +++
27 files changed, 3110 insertions(+), 4 deletions(-)
create mode 100644 .chloggen/chan-tim_postgresMetrics.yaml
diff --git a/.chloggen/chan-tim_postgresMetrics.yaml b/.chloggen/chan-tim_postgresMetrics.yaml
new file mode 100644
index 000000000000..36cbbf6c1b49
--- /dev/null
+++ b/.chloggen/chan-tim_postgresMetrics.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: postgresqlreceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Added new postgresql metrics to acheive parity with Telegraf
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36528]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go
index 056167eea49c..dc0029873a10 100644
--- a/receiver/postgresqlreceiver/client.go
+++ b/receiver/postgresqlreceiver/client.go
@@ -134,20 +134,34 @@ type databaseStats struct {
transactionRollback int64
deadlocks int64
tempFiles int64
+ tupUpdated int64
+ tupReturned int64
+ tupFetched int64
+ tupInserted int64
+ tupDeleted int64
+ blksHit int64
+ blksRead int64
}
func (c *postgreSQLClient) getDatabaseStats(ctx context.Context, databases []string) (map[databaseName]databaseStats, error) {
- query := filterQueryByDatabases("SELECT datname, xact_commit, xact_rollback, deadlocks, temp_files FROM pg_stat_database", databases, false)
+ query := filterQueryByDatabases(
+ "SELECT datname, xact_commit, xact_rollback, deadlocks, temp_files, tup_updated, tup_returned, tup_fetched, tup_inserted, tup_deleted, blks_hit, blks_read FROM pg_stat_database",
+ databases,
+ false,
+ )
+
rows, err := c.client.QueryContext(ctx, query)
if err != nil {
return nil, err
}
+
var errs error
dbStats := map[databaseName]databaseStats{}
+
for rows.Next() {
var datname string
- var transactionCommitted, transactionRollback, deadlocks, tempFiles int64
- err = rows.Scan(&datname, &transactionCommitted, &transactionRollback, &deadlocks, &tempFiles)
+ var transactionCommitted, transactionRollback, deadlocks, tempFiles, tupUpdated, tupReturned, tupFetched, tupInserted, tupDeleted, blksHit, blksRead int64
+ err = rows.Scan(&datname, &transactionCommitted, &transactionRollback, &deadlocks, &tempFiles, &tupUpdated, &tupReturned, &tupFetched, &tupInserted, &tupDeleted, &blksHit, &blksRead)
if err != nil {
errs = multierr.Append(errs, err)
continue
@@ -158,6 +172,13 @@ func (c *postgreSQLClient) getDatabaseStats(ctx context.Context, databases []str
transactionRollback: transactionRollback,
deadlocks: deadlocks,
tempFiles: tempFiles,
+ tupUpdated: tupUpdated,
+ tupReturned: tupReturned,
+ tupFetched: tupFetched,
+ tupInserted: tupInserted,
+ tupDeleted: tupDeleted,
+ blksHit: blksHit,
+ blksRead: blksRead,
}
}
}
diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md
index 0de5881a955a..ba9e0ce2548a 100644
--- a/receiver/postgresqlreceiver/documentation.md
+++ b/receiver/postgresqlreceiver/documentation.md
@@ -253,6 +253,22 @@ metrics:
enabled: true
```
+### postgresql.blks_hit
+
+Number of times disk blocks were found already in the buffer cache.
+
+| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
+| ---- | ----------- | ---------- | ----------------------- | --------- |
+| {blks_hit} | Sum | Int | Cumulative | true |
+
+### postgresql.blks_read
+
+Number of disk blocks read in this database.
+
+| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
+| ---- | ----------- | ---------- | ----------------------- | --------- |
+| {blks_read} | Sum | Int | Cumulative | true |
+
### postgresql.database.locks
The number of database locks.
@@ -293,6 +309,46 @@ The number of temp files.
| ---- | ----------- | ---------- | ----------------------- | --------- |
| {temp_file} | Sum | Int | Cumulative | true |
+### postgresql.tup_deleted
+
+Number of rows deleted by queries in the database.
+
+| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
+| ---- | ----------- | ---------- | ----------------------- | --------- |
+| {tup_deleted} | Sum | Int | Cumulative | true |
+
+### postgresql.tup_fetched
+
+Number of rows fetched by queries in the database.
+
+| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
+| ---- | ----------- | ---------- | ----------------------- | --------- |
+| {tup_fetched} | Sum | Int | Cumulative | true |
+
+### postgresql.tup_inserted
+
+Number of rows inserted by queries in the database.
+
+| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
+| ---- | ----------- | ---------- | ----------------------- | --------- |
+| {tup_inserted} | Sum | Int | Cumulative | true |
+
+### postgresql.tup_returned
+
+Number of rows returned by queries in the database.
+
+| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
+| ---- | ----------- | ---------- | ----------------------- | --------- |
+| {tup_returned} | Sum | Int | Cumulative | true |
+
+### postgresql.tup_updated
+
+Number of rows updated by queries in the database.
+
+| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
+| ---- | ----------- | ---------- | ----------------------- | --------- |
+| {tup_updated} | Sum | Int | Cumulative | true |
+
### postgresql.wal.delay
Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it.
diff --git a/receiver/postgresqlreceiver/integration_test.go b/receiver/postgresqlreceiver/integration_test.go
index cadd953bb701..d73627615d46 100644
--- a/receiver/postgresqlreceiver/integration_test.go
+++ b/receiver/postgresqlreceiver/integration_test.go
@@ -79,6 +79,13 @@ func integrationTest(name string, databases []string) func(*testing.T) {
rCfg.Metrics.PostgresqlWalDelay.Enabled = true
rCfg.Metrics.PostgresqlDeadlocks.Enabled = true
rCfg.Metrics.PostgresqlTempFiles.Enabled = true
+ rCfg.Metrics.PostgresqlTupUpdated.Enabled = true
+ rCfg.Metrics.PostgresqlTupReturned.Enabled = true
+ rCfg.Metrics.PostgresqlTupFetched.Enabled = true
+ rCfg.Metrics.PostgresqlTupInserted.Enabled = true
+ rCfg.Metrics.PostgresqlTupDeleted.Enabled = true
+ rCfg.Metrics.PostgresqlBlksHit.Enabled = true
+ rCfg.Metrics.PostgresqlBlksRead.Enabled = true
rCfg.Metrics.PostgresqlSequentialScans.Enabled = true
rCfg.Metrics.PostgresqlDatabaseLocks.Enabled = true
}),
diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config.go b/receiver/postgresqlreceiver/internal/metadata/generated_config.go
index a0a53f803403..d63330f7e3f7 100644
--- a/receiver/postgresqlreceiver/internal/metadata/generated_config.go
+++ b/receiver/postgresqlreceiver/internal/metadata/generated_config.go
@@ -34,6 +34,8 @@ type MetricsConfig struct {
PostgresqlBgwriterCheckpointCount MetricConfig `mapstructure:"postgresql.bgwriter.checkpoint.count"`
PostgresqlBgwriterDuration MetricConfig `mapstructure:"postgresql.bgwriter.duration"`
PostgresqlBgwriterMaxwritten MetricConfig `mapstructure:"postgresql.bgwriter.maxwritten"`
+ PostgresqlBlksHit MetricConfig `mapstructure:"postgresql.blks_hit"`
+ PostgresqlBlksRead MetricConfig `mapstructure:"postgresql.blks_read"`
PostgresqlBlocksRead MetricConfig `mapstructure:"postgresql.blocks_read"`
PostgresqlCommits MetricConfig `mapstructure:"postgresql.commits"`
PostgresqlConnectionMax MetricConfig `mapstructure:"postgresql.connection.max"`
@@ -52,6 +54,11 @@ type MetricsConfig struct {
PostgresqlTableSize MetricConfig `mapstructure:"postgresql.table.size"`
PostgresqlTableVacuumCount MetricConfig `mapstructure:"postgresql.table.vacuum.count"`
PostgresqlTempFiles MetricConfig `mapstructure:"postgresql.temp_files"`
+ PostgresqlTupDeleted MetricConfig `mapstructure:"postgresql.tup_deleted"`
+ PostgresqlTupFetched MetricConfig `mapstructure:"postgresql.tup_fetched"`
+ PostgresqlTupInserted MetricConfig `mapstructure:"postgresql.tup_inserted"`
+ PostgresqlTupReturned MetricConfig `mapstructure:"postgresql.tup_returned"`
+ PostgresqlTupUpdated MetricConfig `mapstructure:"postgresql.tup_updated"`
PostgresqlWalAge MetricConfig `mapstructure:"postgresql.wal.age"`
PostgresqlWalDelay MetricConfig `mapstructure:"postgresql.wal.delay"`
PostgresqlWalLag MetricConfig `mapstructure:"postgresql.wal.lag"`
@@ -77,6 +84,12 @@ func DefaultMetricsConfig() MetricsConfig {
PostgresqlBgwriterMaxwritten: MetricConfig{
Enabled: true,
},
+ PostgresqlBlksHit: MetricConfig{
+ Enabled: false,
+ },
+ PostgresqlBlksRead: MetricConfig{
+ Enabled: false,
+ },
PostgresqlBlocksRead: MetricConfig{
Enabled: true,
},
@@ -131,6 +144,21 @@ func DefaultMetricsConfig() MetricsConfig {
PostgresqlTempFiles: MetricConfig{
Enabled: false,
},
+ PostgresqlTupDeleted: MetricConfig{
+ Enabled: false,
+ },
+ PostgresqlTupFetched: MetricConfig{
+ Enabled: false,
+ },
+ PostgresqlTupInserted: MetricConfig{
+ Enabled: false,
+ },
+ PostgresqlTupReturned: MetricConfig{
+ Enabled: false,
+ },
+ PostgresqlTupUpdated: MetricConfig{
+ Enabled: false,
+ },
PostgresqlWalAge: MetricConfig{
Enabled: true,
},
diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go
index e97658befe45..aa7fc527db16 100644
--- a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go
+++ b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go
@@ -31,6 +31,8 @@ func TestMetricsBuilderConfig(t *testing.T) {
PostgresqlBgwriterCheckpointCount: MetricConfig{Enabled: true},
PostgresqlBgwriterDuration: MetricConfig{Enabled: true},
PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: true},
+ PostgresqlBlksHit: MetricConfig{Enabled: true},
+ PostgresqlBlksRead: MetricConfig{Enabled: true},
PostgresqlBlocksRead: MetricConfig{Enabled: true},
PostgresqlCommits: MetricConfig{Enabled: true},
PostgresqlConnectionMax: MetricConfig{Enabled: true},
@@ -49,6 +51,11 @@ func TestMetricsBuilderConfig(t *testing.T) {
PostgresqlTableSize: MetricConfig{Enabled: true},
PostgresqlTableVacuumCount: MetricConfig{Enabled: true},
PostgresqlTempFiles: MetricConfig{Enabled: true},
+ PostgresqlTupDeleted: MetricConfig{Enabled: true},
+ PostgresqlTupFetched: MetricConfig{Enabled: true},
+ PostgresqlTupInserted: MetricConfig{Enabled: true},
+ PostgresqlTupReturned: MetricConfig{Enabled: true},
+ PostgresqlTupUpdated: MetricConfig{Enabled: true},
PostgresqlWalAge: MetricConfig{Enabled: true},
PostgresqlWalDelay: MetricConfig{Enabled: true},
PostgresqlWalLag: MetricConfig{Enabled: true},
@@ -71,6 +78,8 @@ func TestMetricsBuilderConfig(t *testing.T) {
PostgresqlBgwriterCheckpointCount: MetricConfig{Enabled: false},
PostgresqlBgwriterDuration: MetricConfig{Enabled: false},
PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: false},
+ PostgresqlBlksHit: MetricConfig{Enabled: false},
+ PostgresqlBlksRead: MetricConfig{Enabled: false},
PostgresqlBlocksRead: MetricConfig{Enabled: false},
PostgresqlCommits: MetricConfig{Enabled: false},
PostgresqlConnectionMax: MetricConfig{Enabled: false},
@@ -89,6 +98,11 @@ func TestMetricsBuilderConfig(t *testing.T) {
PostgresqlTableSize: MetricConfig{Enabled: false},
PostgresqlTableVacuumCount: MetricConfig{Enabled: false},
PostgresqlTempFiles: MetricConfig{Enabled: false},
+ PostgresqlTupDeleted: MetricConfig{Enabled: false},
+ PostgresqlTupFetched: MetricConfig{Enabled: false},
+ PostgresqlTupInserted: MetricConfig{Enabled: false},
+ PostgresqlTupReturned: MetricConfig{Enabled: false},
+ PostgresqlTupUpdated: MetricConfig{Enabled: false},
PostgresqlWalAge: MetricConfig{Enabled: false},
PostgresqlWalDelay: MetricConfig{Enabled: false},
PostgresqlWalLag: MetricConfig{Enabled: false},
diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go
index 0d5f0e2cae18..a45ef072aaa5 100644
--- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go
+++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go
@@ -550,6 +550,108 @@ func newMetricPostgresqlBgwriterMaxwritten(cfg MetricConfig) metricPostgresqlBgw
return m
}
+type metricPostgresqlBlksHit struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills postgresql.blks_hit metric with initial data.
+func (m *metricPostgresqlBlksHit) init() {
+ m.data.SetName("postgresql.blks_hit")
+ m.data.SetDescription("Number of times disk blocks were found already in the buffer cache.")
+ m.data.SetUnit("{blks_hit}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+}
+
+func (m *metricPostgresqlBlksHit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricPostgresqlBlksHit) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricPostgresqlBlksHit) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricPostgresqlBlksHit(cfg MetricConfig) metricPostgresqlBlksHit {
+ m := metricPostgresqlBlksHit{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricPostgresqlBlksRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills postgresql.blks_read metric with initial data.
+func (m *metricPostgresqlBlksRead) init() {
+ m.data.SetName("postgresql.blks_read")
+ m.data.SetDescription("Number of disk blocks read in this database.")
+ m.data.SetUnit("{blks_read}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+}
+
+func (m *metricPostgresqlBlksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricPostgresqlBlksRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricPostgresqlBlksRead) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricPostgresqlBlksRead(cfg MetricConfig) metricPostgresqlBlksRead {
+ m := metricPostgresqlBlksRead{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
type metricPostgresqlBlocksRead struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
@@ -1472,6 +1574,261 @@ func newMetricPostgresqlTempFiles(cfg MetricConfig) metricPostgresqlTempFiles {
return m
}
+type metricPostgresqlTupDeleted struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills postgresql.tup_deleted metric with initial data.
+func (m *metricPostgresqlTupDeleted) init() {
+ m.data.SetName("postgresql.tup_deleted")
+ m.data.SetDescription("Number of rows deleted by queries in the database.")
+ m.data.SetUnit("{tup_deleted}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+}
+
+func (m *metricPostgresqlTupDeleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricPostgresqlTupDeleted) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricPostgresqlTupDeleted) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricPostgresqlTupDeleted(cfg MetricConfig) metricPostgresqlTupDeleted {
+ m := metricPostgresqlTupDeleted{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricPostgresqlTupFetched struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills postgresql.tup_fetched metric with initial data.
+func (m *metricPostgresqlTupFetched) init() {
+ m.data.SetName("postgresql.tup_fetched")
+ m.data.SetDescription("Number of rows fetched by queries in the database.")
+ m.data.SetUnit("{tup_fetched}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+}
+
+func (m *metricPostgresqlTupFetched) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricPostgresqlTupFetched) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricPostgresqlTupFetched) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricPostgresqlTupFetched(cfg MetricConfig) metricPostgresqlTupFetched {
+ m := metricPostgresqlTupFetched{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricPostgresqlTupInserted struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills postgresql.tup_inserted metric with initial data.
+func (m *metricPostgresqlTupInserted) init() {
+ m.data.SetName("postgresql.tup_inserted")
+ m.data.SetDescription("Number of rows inserted by queries in the database.")
+ m.data.SetUnit("{tup_inserted}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+}
+
+func (m *metricPostgresqlTupInserted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricPostgresqlTupInserted) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricPostgresqlTupInserted) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricPostgresqlTupInserted(cfg MetricConfig) metricPostgresqlTupInserted {
+ m := metricPostgresqlTupInserted{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricPostgresqlTupReturned struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills postgresql.tup_returned metric with initial data.
+func (m *metricPostgresqlTupReturned) init() {
+ m.data.SetName("postgresql.tup_returned")
+ m.data.SetDescription("Number of rows returned by queries in the database.")
+ m.data.SetUnit("{tup_returned}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+}
+
+func (m *metricPostgresqlTupReturned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricPostgresqlTupReturned) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricPostgresqlTupReturned) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricPostgresqlTupReturned(cfg MetricConfig) metricPostgresqlTupReturned {
+ m := metricPostgresqlTupReturned{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricPostgresqlTupUpdated struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills postgresql.tup_updated metric with initial data.
+func (m *metricPostgresqlTupUpdated) init() {
+ m.data.SetName("postgresql.tup_updated")
+ m.data.SetDescription("Number of rows updated by queries in the database.")
+ m.data.SetUnit("{tup_updated}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+}
+
+func (m *metricPostgresqlTupUpdated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricPostgresqlTupUpdated) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricPostgresqlTupUpdated) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricPostgresqlTupUpdated(cfg MetricConfig) metricPostgresqlTupUpdated {
+ m := metricPostgresqlTupUpdated{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
type metricPostgresqlWalAge struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
@@ -1641,6 +1998,8 @@ type MetricsBuilder struct {
metricPostgresqlBgwriterCheckpointCount metricPostgresqlBgwriterCheckpointCount
metricPostgresqlBgwriterDuration metricPostgresqlBgwriterDuration
metricPostgresqlBgwriterMaxwritten metricPostgresqlBgwriterMaxwritten
+ metricPostgresqlBlksHit metricPostgresqlBlksHit
+ metricPostgresqlBlksRead metricPostgresqlBlksRead
metricPostgresqlBlocksRead metricPostgresqlBlocksRead
metricPostgresqlCommits metricPostgresqlCommits
metricPostgresqlConnectionMax metricPostgresqlConnectionMax
@@ -1659,6 +2018,11 @@ type MetricsBuilder struct {
metricPostgresqlTableSize metricPostgresqlTableSize
metricPostgresqlTableVacuumCount metricPostgresqlTableVacuumCount
metricPostgresqlTempFiles metricPostgresqlTempFiles
+ metricPostgresqlTupDeleted metricPostgresqlTupDeleted
+ metricPostgresqlTupFetched metricPostgresqlTupFetched
+ metricPostgresqlTupInserted metricPostgresqlTupInserted
+ metricPostgresqlTupReturned metricPostgresqlTupReturned
+ metricPostgresqlTupUpdated metricPostgresqlTupUpdated
metricPostgresqlWalAge metricPostgresqlWalAge
metricPostgresqlWalDelay metricPostgresqlWalDelay
metricPostgresqlWalLag metricPostgresqlWalLag
@@ -1694,6 +2058,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt
metricPostgresqlBgwriterCheckpointCount: newMetricPostgresqlBgwriterCheckpointCount(mbc.Metrics.PostgresqlBgwriterCheckpointCount),
metricPostgresqlBgwriterDuration: newMetricPostgresqlBgwriterDuration(mbc.Metrics.PostgresqlBgwriterDuration),
metricPostgresqlBgwriterMaxwritten: newMetricPostgresqlBgwriterMaxwritten(mbc.Metrics.PostgresqlBgwriterMaxwritten),
+ metricPostgresqlBlksHit: newMetricPostgresqlBlksHit(mbc.Metrics.PostgresqlBlksHit),
+ metricPostgresqlBlksRead: newMetricPostgresqlBlksRead(mbc.Metrics.PostgresqlBlksRead),
metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(mbc.Metrics.PostgresqlBlocksRead),
metricPostgresqlCommits: newMetricPostgresqlCommits(mbc.Metrics.PostgresqlCommits),
metricPostgresqlConnectionMax: newMetricPostgresqlConnectionMax(mbc.Metrics.PostgresqlConnectionMax),
@@ -1712,6 +2078,11 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt
metricPostgresqlTableSize: newMetricPostgresqlTableSize(mbc.Metrics.PostgresqlTableSize),
metricPostgresqlTableVacuumCount: newMetricPostgresqlTableVacuumCount(mbc.Metrics.PostgresqlTableVacuumCount),
metricPostgresqlTempFiles: newMetricPostgresqlTempFiles(mbc.Metrics.PostgresqlTempFiles),
+ metricPostgresqlTupDeleted: newMetricPostgresqlTupDeleted(mbc.Metrics.PostgresqlTupDeleted),
+ metricPostgresqlTupFetched: newMetricPostgresqlTupFetched(mbc.Metrics.PostgresqlTupFetched),
+ metricPostgresqlTupInserted: newMetricPostgresqlTupInserted(mbc.Metrics.PostgresqlTupInserted),
+ metricPostgresqlTupReturned: newMetricPostgresqlTupReturned(mbc.Metrics.PostgresqlTupReturned),
+ metricPostgresqlTupUpdated: newMetricPostgresqlTupUpdated(mbc.Metrics.PostgresqlTupUpdated),
metricPostgresqlWalAge: newMetricPostgresqlWalAge(mbc.Metrics.PostgresqlWalAge),
metricPostgresqlWalDelay: newMetricPostgresqlWalDelay(mbc.Metrics.PostgresqlWalDelay),
metricPostgresqlWalLag: newMetricPostgresqlWalLag(mbc.Metrics.PostgresqlWalLag),
@@ -1817,6 +2188,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
mb.metricPostgresqlBgwriterCheckpointCount.emit(ils.Metrics())
mb.metricPostgresqlBgwriterDuration.emit(ils.Metrics())
mb.metricPostgresqlBgwriterMaxwritten.emit(ils.Metrics())
+ mb.metricPostgresqlBlksHit.emit(ils.Metrics())
+ mb.metricPostgresqlBlksRead.emit(ils.Metrics())
mb.metricPostgresqlBlocksRead.emit(ils.Metrics())
mb.metricPostgresqlCommits.emit(ils.Metrics())
mb.metricPostgresqlConnectionMax.emit(ils.Metrics())
@@ -1835,6 +2208,11 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
mb.metricPostgresqlTableSize.emit(ils.Metrics())
mb.metricPostgresqlTableVacuumCount.emit(ils.Metrics())
mb.metricPostgresqlTempFiles.emit(ils.Metrics())
+ mb.metricPostgresqlTupDeleted.emit(ils.Metrics())
+ mb.metricPostgresqlTupFetched.emit(ils.Metrics())
+ mb.metricPostgresqlTupInserted.emit(ils.Metrics())
+ mb.metricPostgresqlTupReturned.emit(ils.Metrics())
+ mb.metricPostgresqlTupUpdated.emit(ils.Metrics())
mb.metricPostgresqlWalAge.emit(ils.Metrics())
mb.metricPostgresqlWalDelay.emit(ils.Metrics())
mb.metricPostgresqlWalLag.emit(ils.Metrics())
@@ -1899,6 +2277,16 @@ func (mb *MetricsBuilder) RecordPostgresqlBgwriterMaxwrittenDataPoint(ts pcommon
mb.metricPostgresqlBgwriterMaxwritten.recordDataPoint(mb.startTime, ts, val)
}
+// RecordPostgresqlBlksHitDataPoint adds a data point to postgresql.blks_hit metric.
+func (mb *MetricsBuilder) RecordPostgresqlBlksHitDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricPostgresqlBlksHit.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordPostgresqlBlksReadDataPoint adds a data point to postgresql.blks_read metric.
+func (mb *MetricsBuilder) RecordPostgresqlBlksReadDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricPostgresqlBlksRead.recordDataPoint(mb.startTime, ts, val)
+}
+
// RecordPostgresqlBlocksReadDataPoint adds a data point to postgresql.blocks_read metric.
func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, sourceAttributeValue AttributeSource) {
mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, sourceAttributeValue.String())
@@ -1989,6 +2377,31 @@ func (mb *MetricsBuilder) RecordPostgresqlTempFilesDataPoint(ts pcommon.Timestam
mb.metricPostgresqlTempFiles.recordDataPoint(mb.startTime, ts, val)
}
+// RecordPostgresqlTupDeletedDataPoint adds a data point to postgresql.tup_deleted metric.
+func (mb *MetricsBuilder) RecordPostgresqlTupDeletedDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricPostgresqlTupDeleted.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordPostgresqlTupFetchedDataPoint adds a data point to postgresql.tup_fetched metric.
+func (mb *MetricsBuilder) RecordPostgresqlTupFetchedDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricPostgresqlTupFetched.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordPostgresqlTupInsertedDataPoint adds a data point to postgresql.tup_inserted metric.
+func (mb *MetricsBuilder) RecordPostgresqlTupInsertedDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricPostgresqlTupInserted.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordPostgresqlTupReturnedDataPoint adds a data point to postgresql.tup_returned metric.
+func (mb *MetricsBuilder) RecordPostgresqlTupReturnedDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricPostgresqlTupReturned.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordPostgresqlTupUpdatedDataPoint adds a data point to postgresql.tup_updated metric.
+func (mb *MetricsBuilder) RecordPostgresqlTupUpdatedDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricPostgresqlTupUpdated.recordDataPoint(mb.startTime, ts, val)
+}
+
// RecordPostgresqlWalAgeDataPoint adds a data point to postgresql.wal.age metric.
func (mb *MetricsBuilder) RecordPostgresqlWalAgeDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricPostgresqlWalAge.recordDataPoint(mb.startTime, ts, val)
diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go
index a6a100c5bea4..8cd326d46a27 100644
--- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go
+++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go
@@ -92,6 +92,12 @@ func TestMetricsBuilder(t *testing.T) {
allMetricsCount++
mb.RecordPostgresqlBgwriterMaxwrittenDataPoint(ts, 1)
+ allMetricsCount++
+ mb.RecordPostgresqlBlksHitDataPoint(ts, 1)
+
+ allMetricsCount++
+ mb.RecordPostgresqlBlksReadDataPoint(ts, 1)
+
defaultMetricsCount++
allMetricsCount++
mb.RecordPostgresqlBlocksReadDataPoint(ts, 1, AttributeSourceHeapRead)
@@ -160,6 +166,21 @@ func TestMetricsBuilder(t *testing.T) {
allMetricsCount++
mb.RecordPostgresqlTempFilesDataPoint(ts, 1)
+ allMetricsCount++
+ mb.RecordPostgresqlTupDeletedDataPoint(ts, 1)
+
+ allMetricsCount++
+ mb.RecordPostgresqlTupFetchedDataPoint(ts, 1)
+
+ allMetricsCount++
+ mb.RecordPostgresqlTupInsertedDataPoint(ts, 1)
+
+ allMetricsCount++
+ mb.RecordPostgresqlTupReturnedDataPoint(ts, 1)
+
+ allMetricsCount++
+ mb.RecordPostgresqlTupUpdatedDataPoint(ts, 1)
+
defaultMetricsCount++
allMetricsCount++
mb.RecordPostgresqlWalAgeDataPoint(ts, 1)
@@ -291,6 +312,34 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
+ case "postgresql.blks_hit":
+ assert.False(t, validatedMetrics["postgresql.blks_hit"], "Found a duplicate in the metrics slice: postgresql.blks_hit")
+ validatedMetrics["postgresql.blks_hit"] = true
+ assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
+ assert.Equal(t, "Number of times disk blocks were found already in the buffer cache.", ms.At(i).Description())
+ assert.Equal(t, "{blks_hit}", ms.At(i).Unit())
+ assert.True(t, ms.At(i).Sum().IsMonotonic())
+ assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
+ dp := ms.At(i).Sum().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
+ case "postgresql.blks_read":
+ assert.False(t, validatedMetrics["postgresql.blks_read"], "Found a duplicate in the metrics slice: postgresql.blks_read")
+ validatedMetrics["postgresql.blks_read"] = true
+ assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
+ assert.Equal(t, "Number of disk blocks read in this database.", ms.At(i).Description())
+ assert.Equal(t, "{blks_read}", ms.At(i).Unit())
+ assert.True(t, ms.At(i).Sum().IsMonotonic())
+ assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
+ dp := ms.At(i).Sum().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
case "postgresql.blocks_read":
assert.False(t, validatedMetrics["postgresql.blocks_read"], "Found a duplicate in the metrics slice: postgresql.blocks_read")
validatedMetrics["postgresql.blocks_read"] = true
@@ -556,6 +605,76 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
+ case "postgresql.tup_deleted":
+ assert.False(t, validatedMetrics["postgresql.tup_deleted"], "Found a duplicate in the metrics slice: postgresql.tup_deleted")
+ validatedMetrics["postgresql.tup_deleted"] = true
+ assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
+ assert.Equal(t, "Number of rows deleted by queries in the database.", ms.At(i).Description())
+ assert.Equal(t, "{tup_deleted}", ms.At(i).Unit())
+ assert.True(t, ms.At(i).Sum().IsMonotonic())
+ assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
+ dp := ms.At(i).Sum().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
+ case "postgresql.tup_fetched":
+ assert.False(t, validatedMetrics["postgresql.tup_fetched"], "Found a duplicate in the metrics slice: postgresql.tup_fetched")
+ validatedMetrics["postgresql.tup_fetched"] = true
+ assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
+ assert.Equal(t, "Number of rows fetched by queries in the database.", ms.At(i).Description())
+ assert.Equal(t, "{tup_fetched}", ms.At(i).Unit())
+ assert.True(t, ms.At(i).Sum().IsMonotonic())
+ assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
+ dp := ms.At(i).Sum().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
+ case "postgresql.tup_inserted":
+ assert.False(t, validatedMetrics["postgresql.tup_inserted"], "Found a duplicate in the metrics slice: postgresql.tup_inserted")
+ validatedMetrics["postgresql.tup_inserted"] = true
+ assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
+ assert.Equal(t, "Number of rows inserted by queries in the database.", ms.At(i).Description())
+ assert.Equal(t, "{tup_inserted}", ms.At(i).Unit())
+ assert.True(t, ms.At(i).Sum().IsMonotonic())
+ assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
+ dp := ms.At(i).Sum().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
+ case "postgresql.tup_returned":
+ assert.False(t, validatedMetrics["postgresql.tup_returned"], "Found a duplicate in the metrics slice: postgresql.tup_returned")
+ validatedMetrics["postgresql.tup_returned"] = true
+ assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
+ assert.Equal(t, "Number of rows returned by queries in the database.", ms.At(i).Description())
+ assert.Equal(t, "{tup_returned}", ms.At(i).Unit())
+ assert.True(t, ms.At(i).Sum().IsMonotonic())
+ assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
+ dp := ms.At(i).Sum().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
+ case "postgresql.tup_updated":
+ assert.False(t, validatedMetrics["postgresql.tup_updated"], "Found a duplicate in the metrics slice: postgresql.tup_updated")
+ validatedMetrics["postgresql.tup_updated"] = true
+ assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
+ assert.Equal(t, "Number of rows updated by queries in the database.", ms.At(i).Description())
+ assert.Equal(t, "{tup_updated}", ms.At(i).Unit())
+ assert.True(t, ms.At(i).Sum().IsMonotonic())
+ assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
+ dp := ms.At(i).Sum().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
case "postgresql.wal.age":
assert.False(t, validatedMetrics["postgresql.wal.age"], "Found a duplicate in the metrics slice: postgresql.wal.age")
validatedMetrics["postgresql.wal.age"] = true
diff --git a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml
index 8cf4613c3849..71b1192ade06 100644
--- a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml
+++ b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml
@@ -13,6 +13,10 @@ all_set:
enabled: true
postgresql.bgwriter.maxwritten:
enabled: true
+ postgresql.blks_hit:
+ enabled: true
+ postgresql.blks_read:
+ enabled: true
postgresql.blocks_read:
enabled: true
postgresql.commits:
@@ -49,6 +53,16 @@ all_set:
enabled: true
postgresql.temp_files:
enabled: true
+ postgresql.tup_deleted:
+ enabled: true
+ postgresql.tup_fetched:
+ enabled: true
+ postgresql.tup_inserted:
+ enabled: true
+ postgresql.tup_returned:
+ enabled: true
+ postgresql.tup_updated:
+ enabled: true
postgresql.wal.age:
enabled: true
postgresql.wal.delay:
@@ -78,6 +92,10 @@ none_set:
enabled: false
postgresql.bgwriter.maxwritten:
enabled: false
+ postgresql.blks_hit:
+ enabled: false
+ postgresql.blks_read:
+ enabled: false
postgresql.blocks_read:
enabled: false
postgresql.commits:
@@ -114,6 +132,16 @@ none_set:
enabled: false
postgresql.temp_files:
enabled: false
+ postgresql.tup_deleted:
+ enabled: false
+ postgresql.tup_fetched:
+ enabled: false
+ postgresql.tup_inserted:
+ enabled: false
+ postgresql.tup_returned:
+ enabled: false
+ postgresql.tup_updated:
+ enabled: false
postgresql.wal.age:
enabled: false
postgresql.wal.delay:
diff --git a/receiver/postgresqlreceiver/metadata.yaml b/receiver/postgresqlreceiver/metadata.yaml
index 401d49947b3a..1745f17fba78 100644
--- a/receiver/postgresqlreceiver/metadata.yaml
+++ b/receiver/postgresqlreceiver/metadata.yaml
@@ -310,6 +310,61 @@ metrics:
value_type: double
extended_documentation: |
This metric requires WAL to be enabled with at least one replica.
-
+ postgresql.tup_updated:
+ enabled: false
+ description: Number of rows updated by queries in the database.
+ unit: "{tup_updated}"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ postgresql.tup_returned:
+ enabled: false
+ description: Number of rows returned by queries in the database.
+ unit: "{tup_returned}"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ postgresql.tup_fetched:
+ enabled: false
+ description: Number of rows fetched by queries in the database.
+ unit: "{tup_fetched}"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ postgresql.tup_inserted:
+ enabled: false
+ description: Number of rows inserted by queries in the database.
+ unit: "{tup_inserted}"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ postgresql.tup_deleted:
+ enabled: false
+ description: Number of rows deleted by queries in the database.
+ unit: "{tup_deleted}"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ postgresql.blks_hit:
+ enabled: false
+ description: Number of times disk blocks were found already in the buffer cache.
+ unit: "{blks_hit}"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ postgresql.blks_read:
+ enabled: false
+ description: Number of disk blocks read in this database.
+ unit: "{blks_read}"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
tests:
config:
diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go
index 1fe28994e68f..55d140716d22 100644
--- a/receiver/postgresqlreceiver/scraper.go
+++ b/receiver/postgresqlreceiver/scraper.go
@@ -201,6 +201,13 @@ func (p *postgreSQLScraper) recordDatabase(now pcommon.Timestamp, db string, r *
p.mb.RecordPostgresqlRollbacksDataPoint(now, stats.transactionRollback)
p.mb.RecordPostgresqlDeadlocksDataPoint(now, stats.deadlocks)
p.mb.RecordPostgresqlTempFilesDataPoint(now, stats.tempFiles)
+ p.mb.RecordPostgresqlTupUpdatedDataPoint(now, stats.tupUpdated)
+ p.mb.RecordPostgresqlTupReturnedDataPoint(now, stats.tupReturned)
+ p.mb.RecordPostgresqlTupFetchedDataPoint(now, stats.tupFetched)
+ p.mb.RecordPostgresqlTupInsertedDataPoint(now, stats.tupInserted)
+ p.mb.RecordPostgresqlTupDeletedDataPoint(now, stats.tupDeleted)
+ p.mb.RecordPostgresqlBlksHitDataPoint(now, stats.blksHit)
+ p.mb.RecordPostgresqlBlksReadDataPoint(now, stats.blksRead)
}
rb := p.mb.NewResourceBuilder()
rb.SetPostgresqlDatabaseName(db)
diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go
index fad106672d75..86457712eb21 100644
--- a/receiver/postgresqlreceiver/scraper_test.go
+++ b/receiver/postgresqlreceiver/scraper_test.go
@@ -45,6 +45,13 @@ func TestScraper(t *testing.T) {
cfg.Metrics.PostgresqlWalDelay.Enabled = true
cfg.Metrics.PostgresqlDeadlocks.Enabled = true
cfg.Metrics.PostgresqlTempFiles.Enabled = true
+ cfg.Metrics.PostgresqlTupUpdated.Enabled = true
+ cfg.Metrics.PostgresqlTupReturned.Enabled = true
+ cfg.Metrics.PostgresqlTupFetched.Enabled = true
+ cfg.Metrics.PostgresqlTupInserted.Enabled = true
+ cfg.Metrics.PostgresqlTupDeleted.Enabled = true
+ cfg.Metrics.PostgresqlBlksHit.Enabled = true
+ cfg.Metrics.PostgresqlBlksRead.Enabled = true
cfg.Metrics.PostgresqlSequentialScans.Enabled = true
cfg.Metrics.PostgresqlDatabaseLocks.Enabled = true
@@ -81,6 +88,20 @@ func TestScraperNoDatabaseSingle(t *testing.T) {
cfg.Metrics.PostgresqlDeadlocks.Enabled = true
require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled)
cfg.Metrics.PostgresqlTempFiles.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled)
+ cfg.Metrics.PostgresqlTupUpdated.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled)
+ cfg.Metrics.PostgresqlTupReturned.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled)
+ cfg.Metrics.PostgresqlTupFetched.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled)
+ cfg.Metrics.PostgresqlTupInserted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled)
+ cfg.Metrics.PostgresqlTupDeleted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled)
+ cfg.Metrics.PostgresqlBlksHit.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled)
+ cfg.Metrics.PostgresqlBlksRead.Enabled = true
require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled)
cfg.Metrics.PostgresqlSequentialScans.Enabled = true
require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled)
@@ -100,6 +121,13 @@ func TestScraperNoDatabaseSingle(t *testing.T) {
cfg.Metrics.PostgresqlWalDelay.Enabled = false
cfg.Metrics.PostgresqlDeadlocks.Enabled = false
cfg.Metrics.PostgresqlTempFiles.Enabled = false
+ cfg.Metrics.PostgresqlTupUpdated.Enabled = false
+ cfg.Metrics.PostgresqlTupReturned.Enabled = false
+ cfg.Metrics.PostgresqlTupFetched.Enabled = false
+ cfg.Metrics.PostgresqlTupInserted.Enabled = false
+ cfg.Metrics.PostgresqlTupDeleted.Enabled = false
+ cfg.Metrics.PostgresqlBlksHit.Enabled = false
+ cfg.Metrics.PostgresqlBlksRead.Enabled = false
cfg.Metrics.PostgresqlSequentialScans.Enabled = false
cfg.Metrics.PostgresqlDatabaseLocks.Enabled = false
@@ -135,6 +163,20 @@ func TestScraperNoDatabaseMultipleWithoutPreciseLag(t *testing.T) {
cfg.Metrics.PostgresqlDeadlocks.Enabled = true
require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled)
cfg.Metrics.PostgresqlTempFiles.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled)
+ cfg.Metrics.PostgresqlTupUpdated.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled)
+ cfg.Metrics.PostgresqlTupReturned.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled)
+ cfg.Metrics.PostgresqlTupFetched.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled)
+ cfg.Metrics.PostgresqlTupInserted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled)
+ cfg.Metrics.PostgresqlTupDeleted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled)
+ cfg.Metrics.PostgresqlBlksHit.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled)
+ cfg.Metrics.PostgresqlBlksRead.Enabled = true
require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled)
cfg.Metrics.PostgresqlSequentialScans.Enabled = true
require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled)
@@ -172,6 +214,20 @@ func TestScraperNoDatabaseMultiple(t *testing.T) {
cfg.Metrics.PostgresqlDeadlocks.Enabled = true
require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled)
cfg.Metrics.PostgresqlTempFiles.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled)
+ cfg.Metrics.PostgresqlTupUpdated.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled)
+ cfg.Metrics.PostgresqlTupReturned.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled)
+ cfg.Metrics.PostgresqlTupFetched.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled)
+ cfg.Metrics.PostgresqlTupInserted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled)
+ cfg.Metrics.PostgresqlTupDeleted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled)
+ cfg.Metrics.PostgresqlBlksHit.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled)
+ cfg.Metrics.PostgresqlBlksRead.Enabled = true
require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled)
cfg.Metrics.PostgresqlSequentialScans.Enabled = true
require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled)
@@ -209,6 +265,20 @@ func TestScraperWithResourceAttributeFeatureGate(t *testing.T) {
cfg.Metrics.PostgresqlDeadlocks.Enabled = true
require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled)
cfg.Metrics.PostgresqlTempFiles.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled)
+ cfg.Metrics.PostgresqlTupUpdated.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled)
+ cfg.Metrics.PostgresqlTupReturned.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled)
+ cfg.Metrics.PostgresqlTupFetched.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled)
+ cfg.Metrics.PostgresqlTupInserted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled)
+ cfg.Metrics.PostgresqlTupDeleted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled)
+ cfg.Metrics.PostgresqlBlksHit.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled)
+ cfg.Metrics.PostgresqlBlksRead.Enabled = true
require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled)
cfg.Metrics.PostgresqlSequentialScans.Enabled = true
require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled)
@@ -247,6 +317,20 @@ func TestScraperWithResourceAttributeFeatureGateSingle(t *testing.T) {
cfg.Metrics.PostgresqlDeadlocks.Enabled = true
require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled)
cfg.Metrics.PostgresqlTempFiles.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled)
+ cfg.Metrics.PostgresqlTupUpdated.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled)
+ cfg.Metrics.PostgresqlTupReturned.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled)
+ cfg.Metrics.PostgresqlTupFetched.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled)
+ cfg.Metrics.PostgresqlTupInserted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled)
+ cfg.Metrics.PostgresqlTupDeleted.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled)
+ cfg.Metrics.PostgresqlBlksHit.Enabled = true
+ require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled)
+ cfg.Metrics.PostgresqlBlksRead.Enabled = true
require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled)
cfg.Metrics.PostgresqlSequentialScans.Enabled = true
require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled)
@@ -406,6 +490,13 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin
transactionRollback: int64(idx + 2),
deadlocks: int64(idx + 3),
tempFiles: int64(idx + 4),
+ tupUpdated: int64(idx + 5),
+ tupReturned: int64(idx + 6),
+ tupFetched: int64(idx + 7),
+ tupInserted: int64(idx + 8),
+ tupDeleted: int64(idx + 9),
+ blksHit: int64(idx + 10),
+ blksRead: int64(idx + 11),
}
dbSize[databaseName(db)] = int64(idx + 4)
backends[databaseName(db)] = int64(idx + 3)
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml
index d1ada6328582..40e56d97b018 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml
@@ -196,6 +196,26 @@ resourceMetrics:
stringValue: otel
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -254,6 +274,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -264,6 +334,26 @@ resourceMetrics:
stringValue: otel2
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -322,6 +412,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -341,6 +481,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -399,6 +559,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml
index 8aca138de78c..866b6e1a801a 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml
@@ -15,6 +15,26 @@ resourceMetrics:
startTimeUnixNano: "1706802467703361527"
timeUnixNano: "1706802526712082422"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -73,6 +93,56 @@ resourceMetrics:
timeUnixNano: "1706802526712082422"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -412,6 +482,26 @@ resourceMetrics:
startTimeUnixNano: "1706802467703361527"
timeUnixNano: "1706802526712082422"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -470,6 +560,56 @@ resourceMetrics:
timeUnixNano: "1706802526712082422"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -877,6 +1017,26 @@ resourceMetrics:
startTimeUnixNano: "1706802467703361527"
timeUnixNano: "1706802526712082422"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -935,6 +1095,56 @@ resourceMetrics:
timeUnixNano: "1706802526712082422"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802467703361527"
+ timeUnixNano: "1706802526712082422"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml
index a4c655f3c74b..90edac5efcdf 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml
@@ -196,6 +196,26 @@ resourceMetrics:
stringValue: otel
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -254,6 +274,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -264,6 +334,26 @@ resourceMetrics:
stringValue: otel2
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -322,6 +412,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -341,6 +481,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -399,6 +559,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml
index b3bcba77057a..81167c9fc9f3 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml
@@ -196,6 +196,26 @@ resourceMetrics:
stringValue: otel
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -254,6 +274,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -264,6 +334,26 @@ resourceMetrics:
stringValue: otel2
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -322,6 +412,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml
index c8431659636a..7cb492575fd7 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml
@@ -335,6 +335,26 @@ resourceMetrics:
startTimeUnixNano: "1706802402706723341"
timeUnixNano: "1706802461712893428"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -393,6 +413,56 @@ resourceMetrics:
timeUnixNano: "1706802461712893428"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -800,6 +870,26 @@ resourceMetrics:
startTimeUnixNano: "1706802402706723341"
timeUnixNano: "1706802461712893428"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -858,6 +948,56 @@ resourceMetrics:
timeUnixNano: "1706802461712893428"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml
index 8bbeda6fe2b7..454ceb1e1341 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml
@@ -196,6 +196,26 @@ resourceMetrics:
stringValue: otel
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -254,6 +274,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -264,6 +334,26 @@ resourceMetrics:
stringValue: otel2
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -322,6 +412,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml
index 48ed6f008318..ff3ccfa944ee 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml
@@ -196,6 +196,26 @@ resourceMetrics:
stringValue: otel
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -254,6 +274,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml
index 512e38c76b89..eccc2d04d749 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml
@@ -335,6 +335,26 @@ resourceMetrics:
startTimeUnixNano: "1706802337738657906"
timeUnixNano: "1706802396744882628"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -393,6 +413,56 @@ resourceMetrics:
timeUnixNano: "1706802396744882628"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1706802402706723341"
+ timeUnixNano: "1706802461712893428"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml
index fdcf6e524aec..07c2e86e9a0a 100644
--- a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml
@@ -196,6 +196,26 @@ resourceMetrics:
stringValue: otel
scopeMetrics:
- metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -254,6 +274,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml
index 7518c7be6df5..2fb0cfbeed3f 100644
--- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml
+++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml
@@ -221,6 +221,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -279,6 +299,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -298,6 +368,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -356,6 +446,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "5"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -375,6 +515,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "13"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -433,6 +593,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml
index c2d52cd2bec9..0eebeb9b25f2 100644
--- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml
+++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml
@@ -221,6 +221,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -279,6 +299,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -298,6 +368,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -356,6 +446,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "5"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -375,6 +515,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "13"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -433,6 +593,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml
index edc6795581e0..694dfef86fe3 100644
--- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml
+++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml
@@ -221,6 +221,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -279,6 +299,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -298,6 +368,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -356,6 +446,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "5"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -375,6 +515,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "13"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -433,6 +593,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml
index 785e977b5dd2..29f1db008b3c 100644
--- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml
+++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml
@@ -221,6 +221,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -279,6 +299,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -298,6 +368,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -356,6 +446,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "5"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
@@ -375,6 +515,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "12"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "13"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -433,6 +593,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml
index ca8d32dfb96e..5b9b2fdcc84e 100644
--- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml
+++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml
@@ -221,6 +221,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -279,6 +299,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "5"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml
index e335959a9871..c7908ab42202 100644
--- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml
+++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml
@@ -221,6 +221,26 @@ resourceMetrics:
startTimeUnixNano: "1000000"
timeUnixNano: "2000000"
unit: "1"
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "10"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "11"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
- description: The number of commits.
name: postgresql.commits
sum:
@@ -279,6 +299,56 @@ resourceMetrics:
timeUnixNano: "2000000"
isMonotonic: true
unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "9"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "8"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "5"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
scope:
name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
version: latest
From 9a52558b2503547c5406ac6804c76110bbeb6622 Mon Sep 17 00:00:00 2001
From: Alex Boten <223565+codeboten@users.noreply.github.com>
Date: Wed, 27 Nov 2024 15:15:56 -0800
Subject: [PATCH 18/23] [chore] skip v1.68.0 of grpc dep (#36577)
Trying to add this to avoid getting notified until the next release is
available.
Related to
https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36548
Signed-off-by: Alex Boten <223565+codeboten@users.noreply.github.com>
---
renovate.json | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/renovate.json b/renovate.json
index 3e65271caca1..395451d596aa 100644
--- a/renovate.json
+++ b/renovate.json
@@ -108,6 +108,10 @@
"https://github.com/open-telemetry/opentelemetry-go-contrib"
],
"groupName": "All opentelemetry-go-contrib packages"
+ },
+ {
+ "matchPackageNames": ["google.golang.org/grpc"],
+ "allowedVersions": "!/v1.68.0$/"
}
],
"ignoreDeps": [
From 54691ebe11bb9ec32b4e35cd31fcb94a352de134 Mon Sep 17 00:00:00 2001
From: Christos Markou
Date: Thu, 28 Nov 2024 16:08:29 +0200
Subject: [PATCH 19/23] Revert "Update dependency tcort/markdown-link-check to
v3.13.6" (#36586)
Reverts open-telemetry/opentelemetry-collector-contrib#36555.
After the update of the version the check is failing:
https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/runs/12068945579/job/33655141758#step:10:16
The PR introduced the dependency had the version pinned:
https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36552
---
.github/workflows/changelog.yml | 2 +-
.github/workflows/check-links.yaml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml
index b9af5b10798e..97a9d78c2b3d 100644
--- a/.github/workflows/changelog.yml
+++ b/.github/workflows/changelog.yml
@@ -16,7 +16,7 @@ env:
# We limit cache download as a whole to 5 minutes.
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2
# renovate: datasource=github-releases depName=tcort/markdown-link-check
- MD_LINK_CHECK_VERSION: "3.13.6"
+ MD_LINK_CHECK_VERSION: "3.12.2"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml
index 911cc4563eab..bbfd0bb2ed49 100644
--- a/.github/workflows/check-links.yaml
+++ b/.github/workflows/check-links.yaml
@@ -11,7 +11,7 @@ concurrency:
env:
# renovate: datasource=github-releases depName=tcort/markdown-link-check
- MD_LINK_CHECK_VERSION: "3.13.6"
+ MD_LINK_CHECK_VERSION: "3.12.2"
jobs:
changedfiles:
From 539042d0c718458227d0e6517893d94fc66901a4 Mon Sep 17 00:00:00 2001
From: Pablo Baeyens
Date: Fri, 29 Nov 2024 09:37:40 +0100
Subject: [PATCH 20/23] [chore] Update READMEs to point to security best
practices doc (#36590)
#### Description
Counterpart to open-telemetry/opentelemetry-collector/pull/11773
---
extension/awsproxy/README.md | 2 +-
extension/healthcheckextension/README.md | 2 +-
extension/healthcheckv2extension/README.md | 2 +-
extension/jaegerremotesampling/README.md | 2 +-
processor/remotetapprocessor/README.md | 2 +-
receiver/awsfirehosereceiver/README.md | 2 +-
receiver/awsxrayreceiver/README.md | 5 ++---
receiver/influxdbreceiver/README.md | 2 +-
receiver/jaegerreceiver/README.md | 2 +-
receiver/lokireceiver/README.md | 2 +-
receiver/opencensusreceiver/README.md | 2 +-
receiver/sapmreceiver/README.md | 2 +-
receiver/signalfxreceiver/README.md | 2 +-
receiver/skywalkingreceiver/README.md | 2 +-
receiver/splunkhecreceiver/README.md | 2 +-
receiver/zipkinreceiver/README.md | 2 +-
receiver/zookeeperreceiver/README.md | 2 +-
17 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/extension/awsproxy/README.md b/extension/awsproxy/README.md
index dbe62a19dc87..103eef258750 100644
--- a/extension/awsproxy/README.md
+++ b/extension/awsproxy/README.md
@@ -41,7 +41,7 @@ The TCP address and port on which this proxy listens for requests.
Default: `localhost:2000`
-You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:2000`. This feature gate will be removed in a future release.
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
### proxy_address (Optional)
diff --git a/extension/healthcheckextension/README.md b/extension/healthcheckextension/README.md
index 3986147fab09..615129115ef5 100644
--- a/extension/healthcheckextension/README.md
+++ b/extension/healthcheckextension/README.md
@@ -29,7 +29,7 @@ liveness and/or readiness probe on Kubernetes.
The following settings are required:
-- `endpoint` (default = localhost:13133): Address to publish the health check status. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to 0.0.0.0:13133. This feature gate will be removed in a future release.
+- `endpoint` (default = localhost:13133): Address to publish the health check status. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
- `path` (default = "/"): Specifies the path to be configured for the health check server.
- `response_body` (default = ""): Specifies a static body that overrides the default response returned by the health check service.
diff --git a/extension/healthcheckv2extension/README.md b/extension/healthcheckv2extension/README.md
index 2f8c440afb8b..86192094d093 100644
--- a/extension/healthcheckv2extension/README.md
+++ b/extension/healthcheckv2extension/README.md
@@ -35,7 +35,7 @@ liveness and/or readiness probe on Kubernetes.
The following settings are required:
-- `endpoint` (default = localhost:13133): Address to publish the health check status. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to 0.0.0.0:13133. This feature gate will be removed in a future release.
+- `endpoint` (default = localhost:13133): Address to publish the health check status. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
- `path` (default = "/"): Specifies the path to be configured for the health check server.
- `response_body` (default = ""): Specifies a static body that overrides the default response returned by the health check service.
- `check_collector_pipeline:` (deprecated and ignored): Settings of collector pipeline health check
diff --git a/extension/jaegerremotesampling/README.md b/extension/jaegerremotesampling/README.md
index 73d35cfa2d1f..a152cef1ff89 100644
--- a/extension/jaegerremotesampling/README.md
+++ b/extension/jaegerremotesampling/README.md
@@ -17,7 +17,7 @@ By default, two listeners are made available:
- `localhost:5778`, following the legacy remote sampling endpoint as defined by Jaeger
- `localhost:14250`, following the gRPC remote sampling endpoint, also defined by Jaeger
-You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:5778` and `0.0.0.0:14250` respectively. This feature gate will be removed in a future release.
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
Note that the port `14250` will clash with the Jaeger Receiver. When both are used, it's recommended to change this extension to use another port.
diff --git a/processor/remotetapprocessor/README.md b/processor/remotetapprocessor/README.md
index deac68c238c2..1ca1e295adc0 100644
--- a/processor/remotetapprocessor/README.md
+++ b/processor/remotetapprocessor/README.md
@@ -27,7 +27,7 @@ The Remote Tap processor has two configurable fields: `endpoint` and `limit`:
- `endpoint`: The endpoint on which the WebSocket processor listens. Optional. Defaults
to `localhost:12001`.
- You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:12001`. This feature gate will be removed in a future release.
+ See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
- `limit`: The rate limit over the WebSocket in messages per second. Can be a
float or an integer. Optional. Defaults to `1`.
diff --git a/receiver/awsfirehosereceiver/README.md b/receiver/awsfirehosereceiver/README.md
index 0d6d38793fe0..64c20f13d7a2 100644
--- a/receiver/awsfirehosereceiver/README.md
+++ b/receiver/awsfirehosereceiver/README.md
@@ -38,7 +38,7 @@ The address:port to bind the listener to.
default: `localhost:4433`
-You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:4433`. This feature gate will be removed in a future release.
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
### tls:
See [documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md#server-configuration) for more details.
diff --git a/receiver/awsxrayreceiver/README.md b/receiver/awsxrayreceiver/README.md
index 55174cb9c0f2..5163880970a0 100644
--- a/receiver/awsxrayreceiver/README.md
+++ b/receiver/awsxrayreceiver/README.md
@@ -45,8 +45,7 @@ The UDP address and port on which this receiver listens for X-Ray segment docume
Default: `localhost:2000`
-You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:2000`. This feature gate will be removed in a future release.
-
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
### transport (Optional)
This should always be "udp" as X-Ray SDKs only send segments using UDP.
@@ -61,7 +60,7 @@ The TCP address and port on which this receiver listens for calls from the X-Ray
Default: `0.0.0.0:2000`
-The `component.UseLocalHostAsDefaultHost` feature gate changes this to localhost:2000. This will become the default in a future release.
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
### proxy_address (Optional)
Defines the proxy address that the local TCP server forwards HTTP requests to AWS X-Ray backend through. If left unconfigured, requests will be sent directly.
diff --git a/receiver/influxdbreceiver/README.md b/receiver/influxdbreceiver/README.md
index 5afdc21fb9ef..6a2a3695d57f 100644
--- a/receiver/influxdbreceiver/README.md
+++ b/receiver/influxdbreceiver/README.md
@@ -27,7 +27,7 @@ Write responses:
The following configuration options are supported:
-* `endpoint` (default = localhost:8086) HTTP service endpoint for the line protocol receiver. You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:8086`. This feature gate will be removed in a future release.
+* `endpoint` (default = localhost:8086) HTTP service endpoint for the line protocol receiver. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
The full list of settings exposed for this receiver are documented in [config.go](config.go).
diff --git a/receiver/jaegerreceiver/README.md b/receiver/jaegerreceiver/README.md
index e01b8e454156..6039fefa5908 100644
--- a/receiver/jaegerreceiver/README.md
+++ b/receiver/jaegerreceiver/README.md
@@ -28,7 +28,7 @@ object configuration parameter.
- `thrift_compact` (default `endpoint` = localhost:6831)
- `thrift_http` (default `endpoint` = localhost:14268)
-You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change tgese endpoints to 0.0.0.0:14250, 0.0.0.0:6832, 0.0.0.0:6831 and 0.0.0.0:14268. This feature gate will be removed in a future release.
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
Examples:
diff --git a/receiver/lokireceiver/README.md b/receiver/lokireceiver/README.md
index 1def93850cc2..7786ce40873d 100644
--- a/receiver/lokireceiver/README.md
+++ b/receiver/lokireceiver/README.md
@@ -21,7 +21,7 @@ This receiver runs HTTP and GRPC servers to ingest log entries in Loki format.
The settings are:
-- `endpoint` (required, default = localhost:3500 for HTTP protocol, localhost:3600 gRPC protocol): host:port to which the receiver is going to receive data. You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:3500` and `0.0.0.0:3600`. This feature gate will be removed in a future release.
+- `endpoint` (required, default = localhost:3500 for HTTP protocol, localhost:3600 gRPC protocol): host:port to which the receiver is going to receive data. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
- `use_incoming_timestamp` (optional, default = false) if set `true` the timestamp from Loki log entry is used
Example:
diff --git a/receiver/opencensusreceiver/README.md b/receiver/opencensusreceiver/README.md
index be85a074ed6d..65aba34c54e5 100644
--- a/receiver/opencensusreceiver/README.md
+++ b/receiver/opencensusreceiver/README.md
@@ -31,7 +31,7 @@ The following settings are configurable:
- `endpoint` (default = localhost:55678): host:port to which the receiver is
going to receive data. The valid syntax is described at
- https://github.com/grpc/grpc/blob/master/doc/naming.md. You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:55678`. This feature gate will be removed in a future release.
+ https://github.com/grpc/grpc/blob/master/doc/naming.md. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
## Advanced Configuration
diff --git a/receiver/sapmreceiver/README.md b/receiver/sapmreceiver/README.md
index 77cda3c06d88..e2fec058f597 100644
--- a/receiver/sapmreceiver/README.md
+++ b/receiver/sapmreceiver/README.md
@@ -25,7 +25,7 @@ The following settings are required:
- `endpoint` (default = `localhost:7276`): Address and port that the SAPM
receiver should bind to.
- You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:7276`. This feature gate will be removed in a future release.
+ See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
The following setting are optional:
diff --git a/receiver/signalfxreceiver/README.md b/receiver/signalfxreceiver/README.md
index 11737ec84cd8..b2b5fd171240 100644
--- a/receiver/signalfxreceiver/README.md
+++ b/receiver/signalfxreceiver/README.md
@@ -28,7 +28,7 @@ The following settings are required:
- `endpoint` (default = `localhost:9943`): Address and port that the SignalFx
receiver should bind to.
- You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:9943`. This feature gate will be removed in a future release.
+ See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
The following settings are optional:
diff --git a/receiver/skywalkingreceiver/README.md b/receiver/skywalkingreceiver/README.md
index 1302d4507e55..58225100734f 100644
--- a/receiver/skywalkingreceiver/README.md
+++ b/receiver/skywalkingreceiver/README.md
@@ -32,7 +32,7 @@ object configuration parameter.
- `grpc` (default `endpoint` = localhost:11800)
- `http` (default `endpoint` = localhost:12800)
-You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change these to `0.0.0.0:11800` and `0.0.0.0:12800`. This feature gate will be removed in a future release.
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
Examples:
diff --git a/receiver/splunkhecreceiver/README.md b/receiver/splunkhecreceiver/README.md
index c58dbcad491f..1887c71ea022 100644
--- a/receiver/splunkhecreceiver/README.md
+++ b/receiver/splunkhecreceiver/README.md
@@ -28,7 +28,7 @@ The following settings are required:
* `endpoint` (default = `localhost:8088`): Address and port that the Splunk HEC
receiver should bind to.
-You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:8088`. This feature gate will be removed in a future release.
+See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
The following settings are optional:
diff --git a/receiver/zipkinreceiver/README.md b/receiver/zipkinreceiver/README.md
index cf13a6043fe4..0fc0939be843 100644
--- a/receiver/zipkinreceiver/README.md
+++ b/receiver/zipkinreceiver/README.md
@@ -28,7 +28,7 @@ receivers:
The following settings are configurable:
-- `endpoint` (default = localhost:9411): host:port on which the receiver is going to receive data.You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:9411`. This feature gate will be removed in a future release. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp).
+- `endpoint` (default = localhost:9411): host:port on which the receiver is going to receive data.See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp).
- `parse_string_tags` (default = false): if enabled, the receiver will attempt to parse string tags/binary annotations into int/bool/float.
## Advanced Configuration
diff --git a/receiver/zookeeperreceiver/README.md b/receiver/zookeeperreceiver/README.md
index eeb05b0b207a..3819dafea1cb 100644
--- a/receiver/zookeeperreceiver/README.md
+++ b/receiver/zookeeperreceiver/README.md
@@ -17,7 +17,7 @@ to be enabled for the receiver to be able to collect metrics.
## Configuration
-- `endpoint`: (default = `localhost:2181`) Endpoint to connect to collect metrics. Takes the form `host:port`. You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:2181`. This feature gate will be removed in a future release.
+- `endpoint`: (default = `localhost:2181`) Endpoint to connect to collect metrics. Takes the form `host:port`. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments.
- `timeout`: (default = `10s`) Timeout within which requests should be completed.
- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.
From c961731471f2f4108223be26bb1204480a15d858 Mon Sep 17 00:00:00 2001
From: Vihas Makwana <121151420+VihasMakwana@users.noreply.github.com>
Date: Sat, 30 Nov 2024 02:01:39 +0530
Subject: [PATCH 21/23] [receiver/awsfirehose] follow receiver contract
(#36124)
---
.chloggen/awsfirehose-contract.yaml | 25 +++++++++++++++++++
receiver/awsfirehosereceiver/go.mod | 2 +-
receiver/awsfirehosereceiver/logs_receiver.go | 6 ++++-
.../awsfirehosereceiver/logs_receiver_test.go | 8 +++++-
.../awsfirehosereceiver/metrics_receiver.go | 6 ++++-
.../metrics_receiver_test.go | 8 +++++-
6 files changed, 50 insertions(+), 5 deletions(-)
create mode 100644 .chloggen/awsfirehose-contract.yaml
diff --git a/.chloggen/awsfirehose-contract.yaml b/.chloggen/awsfirehose-contract.yaml
new file mode 100644
index 000000000000..32c4fa039026
--- /dev/null
+++ b/.chloggen/awsfirehose-contract.yaml
@@ -0,0 +1,25 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: breaking
+
+# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver)
+component: awsfirehosereceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Follow receiver contract based on type of error
+
+# One or more tracking issues or pull requests related to the change
+issues: [5909]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/receiver/awsfirehosereceiver/go.mod b/receiver/awsfirehosereceiver/go.mod
index 018c9220cd77..3748359a80cd 100644
--- a/receiver/awsfirehosereceiver/go.mod
+++ b/receiver/awsfirehosereceiver/go.mod
@@ -14,6 +14,7 @@ require (
go.opentelemetry.io/collector/config/configtls v1.20.0
go.opentelemetry.io/collector/confmap v1.20.0
go.opentelemetry.io/collector/consumer v0.114.0
+ go.opentelemetry.io/collector/consumer/consumererror v0.114.0
go.opentelemetry.io/collector/consumer/consumertest v0.114.0
go.opentelemetry.io/collector/pdata v1.20.0
go.opentelemetry.io/collector/receiver v0.114.0
@@ -50,7 +51,6 @@ require (
go.opentelemetry.io/collector/config/configcompression v1.20.0 // indirect
go.opentelemetry.io/collector/config/configtelemetry v0.114.0 // indirect
go.opentelemetry.io/collector/config/internal v0.114.0 // indirect
- go.opentelemetry.io/collector/consumer/consumererror v0.114.0 // indirect
go.opentelemetry.io/collector/consumer/consumerprofiles v0.114.0 // indirect
go.opentelemetry.io/collector/extension v0.114.0 // indirect
go.opentelemetry.io/collector/extension/auth v0.114.0 // indirect
diff --git a/receiver/awsfirehosereceiver/logs_receiver.go b/receiver/awsfirehosereceiver/logs_receiver.go
index 2bf02f3bd400..570e6cf1e745 100644
--- a/receiver/awsfirehosereceiver/logs_receiver.go
+++ b/receiver/awsfirehosereceiver/logs_receiver.go
@@ -8,6 +8,7 @@ import (
"net/http"
"go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/consumer/consumererror"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler"
@@ -80,7 +81,10 @@ func (mc *logsConsumer) Consume(ctx context.Context, records [][]byte, commonAtt
err = mc.consumer.ConsumeLogs(ctx, md)
if err != nil {
- return http.StatusInternalServerError, err
+ if consumererror.IsPermanent(err) {
+ return http.StatusBadRequest, err
+ }
+ return http.StatusServiceUnavailable, err
}
return http.StatusOK, nil
}
diff --git a/receiver/awsfirehosereceiver/logs_receiver_test.go b/receiver/awsfirehosereceiver/logs_receiver_test.go
index da448640ddb4..6739f8137929 100644
--- a/receiver/awsfirehosereceiver/logs_receiver_test.go
+++ b/receiver/awsfirehosereceiver/logs_receiver_test.go
@@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/consumer/consumererror"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/receiver/receivertest"
@@ -79,9 +80,14 @@ func TestLogsConsumer(t *testing.T) {
wantStatus: http.StatusBadRequest,
wantErr: testErr,
},
+ "WithConsumerErrorPermanent": {
+ consumerErr: consumererror.NewPermanent(testErr),
+ wantStatus: http.StatusBadRequest,
+ wantErr: consumererror.NewPermanent(testErr),
+ },
"WithConsumerError": {
consumerErr: testErr,
- wantStatus: http.StatusInternalServerError,
+ wantStatus: http.StatusServiceUnavailable,
wantErr: testErr,
},
"WithNoError": {
diff --git a/receiver/awsfirehosereceiver/metrics_receiver.go b/receiver/awsfirehosereceiver/metrics_receiver.go
index e1eb841f4c4b..4a5128583ac0 100644
--- a/receiver/awsfirehosereceiver/metrics_receiver.go
+++ b/receiver/awsfirehosereceiver/metrics_receiver.go
@@ -9,6 +9,7 @@ import (
"net/http"
"go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/consumer/consumererror"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler"
@@ -82,7 +83,10 @@ func (mc *metricsConsumer) Consume(ctx context.Context, records [][]byte, common
err = mc.consumer.ConsumeMetrics(ctx, md)
if err != nil {
- return http.StatusInternalServerError, err
+ if consumererror.IsPermanent(err) {
+ return http.StatusBadRequest, err
+ }
+ return http.StatusServiceUnavailable, err
}
return http.StatusOK, nil
}
diff --git a/receiver/awsfirehosereceiver/metrics_receiver_test.go b/receiver/awsfirehosereceiver/metrics_receiver_test.go
index efe6bf7ccbd3..d32ec4efc8a5 100644
--- a/receiver/awsfirehosereceiver/metrics_receiver_test.go
+++ b/receiver/awsfirehosereceiver/metrics_receiver_test.go
@@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/consumer/consumererror"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/receivertest"
@@ -80,9 +81,14 @@ func TestMetricsConsumer(t *testing.T) {
wantStatus: http.StatusBadRequest,
wantErr: testErr,
},
+ "WithConsumerErrorPermanent": {
+ consumerErr: consumererror.NewPermanent(testErr),
+ wantStatus: http.StatusBadRequest,
+ wantErr: consumererror.NewPermanent(testErr),
+ },
"WithConsumerError": {
consumerErr: testErr,
- wantStatus: http.StatusInternalServerError,
+ wantStatus: http.StatusServiceUnavailable,
wantErr: testErr,
},
"WithNoError": {
From 4b452856182b6a301374eb2f57768853d38e3419 Mon Sep 17 00:00:00 2001
From: Alex Kats <56042997+akats7@users.noreply.github.com>
Date: Sat, 30 Nov 2024 09:33:58 -0500
Subject: [PATCH 22/23] Failover connector max retry bug fix (#36605)
---
.chloggen/failover-max-retry-fix.yaml | 27 ++++++++
connector/failoverconnector/failover_test.go | 68 ++++++++++++++++++-
.../internal/state/pipeline_selector.go | 13 ++--
3 files changed, 98 insertions(+), 10 deletions(-)
create mode 100644 .chloggen/failover-max-retry-fix.yaml
diff --git a/.chloggen/failover-max-retry-fix.yaml b/.chloggen/failover-max-retry-fix.yaml
new file mode 100644
index 000000000000..10de74cc0762
--- /dev/null
+++ b/.chloggen/failover-max-retry-fix.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: bug_fix
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: failoverconnector
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Resolves a bug that prevents proper recovery when disabling max retries
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36587]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/connector/failoverconnector/failover_test.go b/connector/failoverconnector/failover_test.go
index 603bcde86781..6ef406835612 100644
--- a/connector/failoverconnector/failover_test.go
+++ b/connector/failoverconnector/failover_test.go
@@ -202,7 +202,7 @@ func TestFailoverRecovery_MaxRetries(t *testing.T) {
failoverConnector.failover.ModifyConsumerAtIndex(0, consumertest.NewErr(errTracesConsumer))
failoverConnector.failover.ModifyConsumerAtIndex(1, consumertest.NewErr(errTracesConsumer))
- failoverConnector.failover.pS.SetRetryCountToMax(0)
+ failoverConnector.failover.pS.SetRetryCountToValue(0, cfg.MaxRetries)
require.Eventually(t, func() bool {
return consumeTracesAndCheckStable(failoverConnector, 2, tr)
@@ -211,11 +211,77 @@ func TestFailoverRecovery_MaxRetries(t *testing.T) {
failoverConnector.failover.ModifyConsumerAtIndex(0, &sinkFirst)
failoverConnector.failover.ModifyConsumerAtIndex(1, &sinkSecond)
+ // Check that level 0 is skipped because max retry value is hit
require.Eventually(t, func() bool {
return consumeTracesAndCheckStable(failoverConnector, 1, tr)
}, 3*time.Second, 5*time.Millisecond)
}
+func TestFailoverRecovery_MaxRetriesDisabled(t *testing.T) {
+ var sinkFirst, sinkSecond, sinkThird, sinkFourth consumertest.TracesSink
+ tracesFirst := pipeline.NewIDWithName(pipeline.SignalTraces, "traces/first")
+ tracesSecond := pipeline.NewIDWithName(pipeline.SignalTraces, "traces/second")
+ tracesThird := pipeline.NewIDWithName(pipeline.SignalTraces, "traces/third")
+ tracesFourth := pipeline.NewIDWithName(pipeline.SignalTraces, "traces/fourth")
+
+ cfg := &Config{
+ PipelinePriority: [][]pipeline.ID{{tracesFirst}, {tracesSecond}, {tracesThird}, {tracesFourth}},
+ RetryInterval: 50 * time.Millisecond,
+ RetryGap: 10 * time.Millisecond,
+ MaxRetries: 0,
+ }
+
+ router := connector.NewTracesRouter(map[pipeline.ID]consumer.Traces{
+ tracesFirst: &sinkFirst,
+ tracesSecond: &sinkSecond,
+ tracesThird: &sinkThird,
+ tracesFourth: &sinkFourth,
+ })
+
+ conn, err := NewFactory().CreateTracesToTraces(context.Background(),
+ connectortest.NewNopSettings(), cfg, router.(consumer.Traces))
+
+ require.NoError(t, err)
+
+ failoverConnector := conn.(*tracesFailover)
+
+ tr := sampleTrace()
+
+ defer func() {
+ assert.NoError(t, failoverConnector.Shutdown(context.Background()))
+ }()
+
+ failoverConnector.failover.ModifyConsumerAtIndex(0, consumertest.NewErr(errTracesConsumer))
+ failoverConnector.failover.ModifyConsumerAtIndex(1, consumertest.NewErr(errTracesConsumer))
+
+ require.Eventually(t, func() bool {
+ return consumeTracesAndCheckStable(failoverConnector, 2, tr)
+ }, 3*time.Second, 5*time.Millisecond)
+
+ failoverConnector.failover.ModifyConsumerAtIndex(0, &sinkFirst)
+ failoverConnector.failover.ModifyConsumerAtIndex(1, &sinkSecond)
+
+ require.Eventually(t, func() bool {
+ return consumeTracesAndCheckStable(failoverConnector, 0, tr)
+ }, 3*time.Second, 5*time.Millisecond)
+
+ failoverConnector.failover.ModifyConsumerAtIndex(0, consumertest.NewErr(errTracesConsumer))
+ failoverConnector.failover.ModifyConsumerAtIndex(1, consumertest.NewErr(errTracesConsumer))
+ failoverConnector.failover.pS.SetRetryCountToValue(0, cfg.MaxRetries)
+
+ require.Eventually(t, func() bool {
+ return consumeTracesAndCheckStable(failoverConnector, 2, tr)
+ }, 3*time.Second, 5*time.Millisecond)
+
+ failoverConnector.failover.ModifyConsumerAtIndex(0, &sinkFirst)
+ failoverConnector.failover.ModifyConsumerAtIndex(1, &sinkSecond)
+
+ // Check that still resets to level 0 even though max retry value is hit
+ require.Eventually(t, func() bool {
+ return consumeTracesAndCheckStable(failoverConnector, 0, tr)
+ }, 3*time.Second, 5*time.Millisecond)
+}
+
func resetConsumers(conn *tracesFailover, consumers ...consumer.Traces) {
for i, sink := range consumers {
conn.failover.ModifyConsumerAtIndex(i, sink)
diff --git a/connector/failoverconnector/internal/state/pipeline_selector.go b/connector/failoverconnector/internal/state/pipeline_selector.go
index a0f395513b67..08bcedf9bb5f 100644
--- a/connector/failoverconnector/internal/state/pipeline_selector.go
+++ b/connector/failoverconnector/internal/state/pipeline_selector.go
@@ -92,7 +92,7 @@ func (p *PipelineSelector) retryHighPriorityPipelines(ctx context.Context, retry
defer ticker.Stop()
for i := 0; i < len(p.pipelineRetries); i++ {
- if p.maxRetriesUsed(i) {
+ if p.exceededMaxRetries(i) {
continue
}
select {
@@ -110,7 +110,7 @@ func (p *PipelineSelector) retryHighPriorityPipelines(ctx context.Context, retry
// checkContinueRetry checks if retry should be suspended if all higher priority levels have exceeded their max retries
func (p *PipelineSelector) checkContinueRetry(index int) bool {
for i := 0; i < index; i++ {
- if p.loadRetryCount(i) < p.constants.MaxRetries {
+ if p.constants.MaxRetries == 0 || p.loadRetryCount(i) < p.constants.MaxRetries {
return true
}
}
@@ -127,11 +127,6 @@ func (p *PipelineSelector) setToStableIndex(idx int) {
p.currentIndex.Store(p.stableIndex.Load())
}
-// MaxRetriesUsed exported access to maxRetriesUsed
-func (p *PipelineSelector) maxRetriesUsed(idx int) bool {
- return p.loadRetryCount(idx) >= p.constants.MaxRetries
-}
-
// SetNewStableIndex Update stableIndex to the passed stable index
func (p *PipelineSelector) setNewStableIndex(idx int) {
p.resetRetryCount(idx)
@@ -249,8 +244,8 @@ func (p *PipelineSelector) TestRetryPipelines(ctx context.Context, retryInterval
p.enableRetry(ctx, retryInterval, retryGap)
}
-func (p *PipelineSelector) SetRetryCountToMax(idx int) {
- p.pipelineRetries[idx].Store(int32(p.constants.MaxRetries))
+func (p *PipelineSelector) SetRetryCountToValue(idx int, val int) {
+ p.pipelineRetries[idx].Store(int32(val))
}
func (p *PipelineSelector) ResetRetryCount(idx int) {
From bc7d967ab722d28e7c658d37491e5d8e507c353e Mon Sep 17 00:00:00 2001
From: Daniel Jaglowski
Date: Sun, 1 Dec 2024 21:34:12 -0500
Subject: [PATCH 23/23] [connector/routing] Add ability to route by datapoint
context (#36523)
---
.chloggen/routing-by-datapoints-2.yaml | 27 +
connector/routingconnector/README.md | 4 +-
connector/routingconnector/config.go | 2 +-
connector/routingconnector/config_test.go | 16 +
.../internal/pmetricutil/metrics.go | 193 +++
.../internal/pmetricutil/metrics_test.go | 1360 ++++++++++++++++-
.../internal/pmetricutiltest/metrics.go | 154 +-
.../internal/pmetricutiltest/metrics_test.go | 483 +++++-
connector/routingconnector/metrics.go | 10 +
connector/routingconnector/metrics_test.go | 378 +++--
connector/routingconnector/router.go | 50 +-
11 files changed, 2455 insertions(+), 222 deletions(-)
create mode 100644 .chloggen/routing-by-datapoints-2.yaml
diff --git a/.chloggen/routing-by-datapoints-2.yaml b/.chloggen/routing-by-datapoints-2.yaml
new file mode 100644
index 000000000000..12144704d843
--- /dev/null
+++ b/.chloggen/routing-by-datapoints-2.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: routingconnector
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Add abiilty to route by 'datapoint' context
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36523]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/connector/routingconnector/README.md b/connector/routingconnector/README.md
index d7a3a7731419..0198da0978ee 100644
--- a/connector/routingconnector/README.md
+++ b/connector/routingconnector/README.md
@@ -33,7 +33,7 @@ If you are not already familiar with connectors, you may find it helpful to firs
The following settings are available:
- `table (required)`: the routing table for this connector.
-- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `span`, `metric`, `log`, and `request` are supported.
+- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `span`, `metric`, `datapoint`, `log`, and `request` are supported.
- `table.statement`: the routing condition provided as the [OTTL] statement. Required if `table.condition` is not provided. May not be used for `request` context.
- `table.condition`: the routing condition provided as the [OTTL] condition. Required if `table.statement` is not provided. Required for `request` context.
- `table.pipelines (required)`: the list of pipelines to use when the routing condition is met.
@@ -43,7 +43,7 @@ The following settings are available:
### Limitations
-- The `match_once` setting is only supported when using the `resource` context. If any routes use `span`, `metric`, `log` or `request` context, `match_once` must be set to `true`.
+- The `match_once` setting is only supported when using the `resource` context. If any routes use `span`, `metric`, `datapoint`, `log` or `request` context, `match_once` must be set to `true`.
- The `request` context requires use of the `condition` setting, and relies on a very limited grammar. Conditions must be in the form of `request["key"] == "value"` or `request["key"] != "value"`. (In the future, this grammar may be expanded to support more complex conditions.)
### Supported [OTTL] functions
diff --git a/connector/routingconnector/config.go b/connector/routingconnector/config.go
index 33a0c702bca9..b3d5add7160e 100644
--- a/connector/routingconnector/config.go
+++ b/connector/routingconnector/config.go
@@ -77,7 +77,7 @@ func (c *Config) Validate() error {
return err
}
fallthrough
- case "span", "metric", "log": // ok
+ case "span", "metric", "datapoint", "log": // ok
if !c.MatchOnce {
return fmt.Errorf(`%q context is not supported with "match_once: false"`, item.Context)
}
diff --git a/connector/routingconnector/config_test.go b/connector/routingconnector/config_test.go
index 4a0ef0d0d5a4..7356804005d8 100644
--- a/connector/routingconnector/config_test.go
+++ b/connector/routingconnector/config_test.go
@@ -250,6 +250,22 @@ func TestValidateConfig(t *testing.T) {
},
error: `"metric" context is not supported with "match_once: false"`,
},
+ {
+ name: "datapoint context with match_once false",
+ config: &Config{
+ MatchOnce: false,
+ Table: []RoutingTableItem{
+ {
+ Context: "datapoint",
+ Statement: `route() where attributes["attr"] == "acme"`,
+ Pipelines: []pipeline.ID{
+ pipeline.NewIDWithName(pipeline.SignalTraces, "otlp"),
+ },
+ },
+ },
+ },
+ error: `"datapoint" context is not supported with "match_once: false"`,
+ },
{
name: "log context with match_once false",
config: &Config{
diff --git a/connector/routingconnector/internal/pmetricutil/metrics.go b/connector/routingconnector/internal/pmetricutil/metrics.go
index 58199dc02fe8..3744506405de 100644
--- a/connector/routingconnector/internal/pmetricutil/metrics.go
+++ b/connector/routingconnector/internal/pmetricutil/metrics.go
@@ -59,3 +59,196 @@ func MoveMetricsWithContextIf(from, to pmetric.Metrics, f func(pmetric.ResourceM
return rm.ScopeMetrics().Len() == 0
})
}
+
+// MoveDataPointsWithContextIf calls f sequentially for each DataPoint present in the first pmetric.Metrics.
+// If f returns true, the element is removed from the first pmetric.Metrics and added to the second pmetric.Metrics.
+// Notably, the Resource, Scope, and Metric associated with the DataPoint are created in the second pmetric.Metrics only once.
+// Resources, Scopes, or Metrics are removed from the original if they become empty. All ordering is preserved.
+func MoveDataPointsWithContextIf(from, to pmetric.Metrics, f func(pmetric.ResourceMetrics, pmetric.ScopeMetrics, pmetric.Metric, any) bool) {
+ rms := from.ResourceMetrics()
+ for i := 0; i < rms.Len(); i++ {
+ rm := rms.At(i)
+ sms := rm.ScopeMetrics()
+ var rmCopy *pmetric.ResourceMetrics
+ for j := 0; j < sms.Len(); j++ {
+ sm := sms.At(j)
+ ms := sm.Metrics()
+ var smCopy *pmetric.ScopeMetrics
+ for k := 0; k < ms.Len(); k++ {
+ m := ms.At(k)
+ var mCopy *pmetric.Metric
+
+ // TODO condense this code
+ switch m.Type() {
+ case pmetric.MetricTypeGauge:
+ dps := m.Gauge().DataPoints()
+ dps.RemoveIf(func(dp pmetric.NumberDataPoint) bool {
+ if !f(rm, sm, m, dp) {
+ return false
+ }
+ if rmCopy == nil {
+ rmc := to.ResourceMetrics().AppendEmpty()
+ rmCopy = &rmc
+ rm.Resource().CopyTo(rmCopy.Resource())
+ rmCopy.SetSchemaUrl(rm.SchemaUrl())
+ }
+ if smCopy == nil {
+ smc := rmCopy.ScopeMetrics().AppendEmpty()
+ smCopy = &smc
+ sm.Scope().CopyTo(smCopy.Scope())
+ smCopy.SetSchemaUrl(sm.SchemaUrl())
+ }
+ if mCopy == nil {
+ mc := smCopy.Metrics().AppendEmpty()
+ mCopy = &mc
+ mCopy.SetName(m.Name())
+ mCopy.SetDescription(m.Description())
+ mCopy.SetUnit(m.Unit())
+ mCopy.SetEmptyGauge()
+ }
+ dp.CopyTo(mCopy.Gauge().DataPoints().AppendEmpty())
+ return true
+ })
+ case pmetric.MetricTypeSum:
+ dps := m.Sum().DataPoints()
+ dps.RemoveIf(func(dp pmetric.NumberDataPoint) bool {
+ if !f(rm, sm, m, dp) {
+ return false
+ }
+ if rmCopy == nil {
+ rmc := to.ResourceMetrics().AppendEmpty()
+ rmCopy = &rmc
+ rm.Resource().CopyTo(rmCopy.Resource())
+ rmCopy.SetSchemaUrl(rm.SchemaUrl())
+ }
+ if smCopy == nil {
+ smc := rmCopy.ScopeMetrics().AppendEmpty()
+ smCopy = &smc
+ sm.Scope().CopyTo(smCopy.Scope())
+ smCopy.SetSchemaUrl(sm.SchemaUrl())
+ }
+ if mCopy == nil {
+ mc := smCopy.Metrics().AppendEmpty()
+ mCopy = &mc
+ mCopy.SetName(m.Name())
+ mCopy.SetDescription(m.Description())
+ mCopy.SetUnit(m.Unit())
+ mCopy.SetEmptySum()
+ }
+ dp.CopyTo(mCopy.Sum().DataPoints().AppendEmpty())
+ return true
+ })
+ case pmetric.MetricTypeHistogram:
+ dps := m.Histogram().DataPoints()
+ dps.RemoveIf(func(dp pmetric.HistogramDataPoint) bool {
+ if !f(rm, sm, m, dp) {
+ return false
+ }
+ if rmCopy == nil {
+ rmc := to.ResourceMetrics().AppendEmpty()
+ rmCopy = &rmc
+ rm.Resource().CopyTo(rmCopy.Resource())
+ rmCopy.SetSchemaUrl(rm.SchemaUrl())
+ }
+ if smCopy == nil {
+ smc := rmCopy.ScopeMetrics().AppendEmpty()
+ smCopy = &smc
+ sm.Scope().CopyTo(smCopy.Scope())
+ smCopy.SetSchemaUrl(sm.SchemaUrl())
+ }
+ if mCopy == nil {
+ mc := smCopy.Metrics().AppendEmpty()
+ mCopy = &mc
+ mCopy.SetName(m.Name())
+ mCopy.SetDescription(m.Description())
+ mCopy.SetUnit(m.Unit())
+ mCopy.SetEmptyHistogram()
+ }
+ dp.CopyTo(mCopy.Histogram().DataPoints().AppendEmpty())
+ return true
+ })
+ case pmetric.MetricTypeExponentialHistogram:
+ dps := m.ExponentialHistogram().DataPoints()
+ dps.RemoveIf(func(dp pmetric.ExponentialHistogramDataPoint) bool {
+ if !f(rm, sm, m, dp) {
+ return false
+ }
+ if rmCopy == nil {
+ rmc := to.ResourceMetrics().AppendEmpty()
+ rmCopy = &rmc
+ rm.Resource().CopyTo(rmCopy.Resource())
+ rmCopy.SetSchemaUrl(rm.SchemaUrl())
+ }
+ if smCopy == nil {
+ smc := rmCopy.ScopeMetrics().AppendEmpty()
+ smCopy = &smc
+ sm.Scope().CopyTo(smCopy.Scope())
+ smCopy.SetSchemaUrl(sm.SchemaUrl())
+ }
+ if mCopy == nil {
+ mc := smCopy.Metrics().AppendEmpty()
+ mCopy = &mc
+ mCopy.SetName(m.Name())
+ mCopy.SetDescription(m.Description())
+ mCopy.SetUnit(m.Unit())
+ mCopy.SetEmptyExponentialHistogram()
+ }
+ dp.CopyTo(mCopy.ExponentialHistogram().DataPoints().AppendEmpty())
+ return true
+ })
+ case pmetric.MetricTypeSummary:
+ dps := m.Summary().DataPoints()
+ dps.RemoveIf(func(dp pmetric.SummaryDataPoint) bool {
+ if !f(rm, sm, m, dp) {
+ return false
+ }
+ if rmCopy == nil {
+ rmc := to.ResourceMetrics().AppendEmpty()
+ rmCopy = &rmc
+ rm.Resource().CopyTo(rmCopy.Resource())
+ rmCopy.SetSchemaUrl(rm.SchemaUrl())
+ }
+ if smCopy == nil {
+ smc := rmCopy.ScopeMetrics().AppendEmpty()
+ smCopy = &smc
+ sm.Scope().CopyTo(smCopy.Scope())
+ smCopy.SetSchemaUrl(sm.SchemaUrl())
+ }
+ if mCopy == nil {
+ mc := smCopy.Metrics().AppendEmpty()
+ mCopy = &mc
+ mCopy.SetName(m.Name())
+ mCopy.SetDescription(m.Description())
+ mCopy.SetUnit(m.Unit())
+ mCopy.SetEmptySummary()
+ }
+ dp.CopyTo(mCopy.Summary().DataPoints().AppendEmpty())
+ return true
+ })
+ }
+ }
+ ms.RemoveIf(func(m pmetric.Metric) bool {
+ var numDPs int
+ switch m.Type() {
+ case pmetric.MetricTypeGauge:
+ numDPs = m.Gauge().DataPoints().Len()
+ case pmetric.MetricTypeSum:
+ numDPs = m.Sum().DataPoints().Len()
+ case pmetric.MetricTypeHistogram:
+ numDPs = m.Histogram().DataPoints().Len()
+ case pmetric.MetricTypeExponentialHistogram:
+ numDPs = m.ExponentialHistogram().DataPoints().Len()
+ case pmetric.MetricTypeSummary:
+ numDPs = m.Summary().DataPoints().Len()
+ }
+ return numDPs == 0
+ })
+ }
+ sms.RemoveIf(func(sm pmetric.ScopeMetrics) bool {
+ return sm.Metrics().Len() == 0
+ })
+ }
+ rms.RemoveIf(func(rm pmetric.ResourceMetrics) bool {
+ return rm.ScopeMetrics().Len() == 0
+ })
+}
diff --git a/connector/routingconnector/internal/pmetricutil/metrics_test.go b/connector/routingconnector/internal/pmetricutil/metrics_test.go
index 1eff9bc21201..371377cf33bb 100644
--- a/connector/routingconnector/internal/pmetricutil/metrics_test.go
+++ b/connector/routingconnector/internal/pmetricutil/metrics_test.go
@@ -28,9 +28,9 @@ func TestMoveResourcesIf(t *testing.T) {
moveIf: func(pmetric.ResourceMetrics) bool {
return false
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "FG"),
to: pmetric.NewMetrics(),
- expectFrom: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "EF", "FG"),
expectTo: pmetric.NewMetrics(),
},
{
@@ -38,10 +38,10 @@ func TestMoveResourcesIf(t *testing.T) {
moveIf: func(pmetric.ResourceMetrics) bool {
return true
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "FG"),
to: pmetric.NewMetrics(),
expectFrom: pmetric.NewMetrics(),
- expectTo: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "EF", "FG"),
},
{
name: "move_one",
@@ -49,10 +49,10 @@ func TestMoveResourcesIf(t *testing.T) {
rname, ok := rl.Resource().Attributes().Get("resourceName")
return ok && rname.AsString() == "resourceA"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "FG"),
to: pmetric.NewMetrics(),
- expectFrom: pmetricutiltest.NewMetrics("B", "CD", "EF", "FG"),
- expectTo: pmetricutiltest.NewMetrics("A", "CD", "EF", "FG"),
+ expectFrom: pmetricutiltest.NewGauges("B", "CD", "EF", "FG"),
+ expectTo: pmetricutiltest.NewGauges("A", "CD", "EF", "FG"),
},
{
name: "move_to_preexisting",
@@ -60,12 +60,12 @@ func TestMoveResourcesIf(t *testing.T) {
rname, ok := rl.Resource().Attributes().Get("resourceName")
return ok && rname.AsString() == "resourceB"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"),
- to: pmetricutiltest.NewMetrics("1", "2", "3", "4"),
- expectFrom: pmetricutiltest.NewMetrics("A", "CD", "EF", "FG"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "FG"),
+ to: pmetricutiltest.NewGauges("1", "2", "3", "4"),
+ expectFrom: pmetricutiltest.NewGauges("A", "CD", "EF", "FG"),
expectTo: func() pmetric.Metrics {
- move := pmetricutiltest.NewMetrics("B", "CD", "EF", "FG")
- moveTo := pmetricutiltest.NewMetrics("1", "2", "3", "4")
+ move := pmetricutiltest.NewGauges("B", "CD", "EF", "FG")
+ moveTo := pmetricutiltest.NewGauges("1", "2", "3", "4")
move.ResourceMetrics().MoveAndAppendTo(moveTo.ResourceMetrics())
return moveTo
}(),
@@ -95,9 +95,9 @@ func TestMoveMetricsWithContextIf(t *testing.T) {
moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool {
return false
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
- expectFrom: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectTo: pmetric.NewMetrics(),
},
{
@@ -105,10 +105,10 @@ func TestMoveMetricsWithContextIf(t *testing.T) {
moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool {
return true
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
expectFrom: pmetric.NewMetrics(),
- expectTo: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
},
{
name: "move_all_from_one_resource",
@@ -116,10 +116,10 @@ func TestMoveMetricsWithContextIf(t *testing.T) {
rname, ok := rl.Resource().Attributes().Get("resourceName")
return ok && rname.AsString() == "resourceB"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
- expectFrom: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"),
- expectTo: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"),
+ expectFrom: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ expectTo: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
},
{
name: "move_all_from_one_scope",
@@ -127,37 +127,37 @@ func TestMoveMetricsWithContextIf(t *testing.T) {
rname, ok := rl.Resource().Attributes().Get("resourceName")
return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
expectFrom: pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("A",
pmetricutiltest.Scope("C",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
pmetricutiltest.Resource("B",
pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
),
- expectTo: pmetricutiltest.NewMetrics("B", "C", "EF", "GH"),
+ expectTo: pmetricutiltest.NewGauges("B", "C", "EF", "GH"),
},
{
name: "move_all_from_one_scope_in_each_resource",
moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool {
return sl.Scope().Name() == "scopeD"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
- expectFrom: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"),
- expectTo: pmetricutiltest.NewMetrics("AB", "D", "EF", "GH"),
+ expectFrom: pmetricutiltest.NewGauges("AB", "C", "EF", "GH"),
+ expectTo: pmetricutiltest.NewGauges("AB", "D", "EF", "GH"),
},
{
name: "move_one",
@@ -165,40 +165,40 @@ func TestMoveMetricsWithContextIf(t *testing.T) {
rname, ok := rl.Resource().Attributes().Get("resourceName")
return ok && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
expectFrom: pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("A",
pmetricutiltest.Scope("C",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
pmetricutiltest.Resource("B",
pmetricutiltest.Scope("C",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
),
- expectTo: pmetricutiltest.NewMetrics("A", "D", "F", "GH"),
+ expectTo: pmetricutiltest.NewGauges("A", "D", "F", "GH"),
},
{
name: "move_one_from_each_scope",
moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric) bool {
return m.Name() == "metricE"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
- expectFrom: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"),
- expectTo: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
},
{
name: "move_one_from_each_scope_in_one_resource",
@@ -206,49 +206,49 @@ func TestMoveMetricsWithContextIf(t *testing.T) {
rname, ok := rl.Resource().Attributes().Get("resourceName")
return ok && rname.AsString() == "resourceB" && m.Name() == "metricE"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
to: pmetric.NewMetrics(),
expectFrom: pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("A",
pmetricutiltest.Scope("C",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
pmetricutiltest.Resource("B",
pmetricutiltest.Scope("C",
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
),
- expectTo: pmetricutiltest.NewMetrics("B", "CD", "E", "GH"),
+ expectTo: pmetricutiltest.NewGauges("B", "CD", "E", "GH"),
},
{
name: "move_some_to_preexisting",
moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool {
return sl.Scope().Name() == "scopeD"
},
- from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- to: pmetricutiltest.NewMetrics("1", "2", "3", "4"),
- expectFrom: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"),
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetricutiltest.NewGauges("1", "2", "3", "4"),
+ expectFrom: pmetricutiltest.NewGauges("AB", "C", "EF", "GH"),
expectTo: pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("1", pmetricutiltest.Scope("2",
- pmetricutiltest.Metric("3", pmetricutiltest.NumberDataPoint("4")),
+ pmetricutiltest.Gauge("3", pmetricutiltest.NumberDataPoint("4")),
)),
pmetricutiltest.Resource("A", pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
)),
pmetricutiltest.Resource("B", pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
)),
),
},
@@ -262,3 +262,1247 @@ func TestMoveMetricsWithContextIf(t *testing.T) {
})
}
}
+
+func TestMoveDataPointsWithContextIf(t *testing.T) {
+ testCases := []struct {
+ name string
+ moveIf func(pmetric.ResourceMetrics, pmetric.ScopeMetrics, pmetric.Metric, any) bool
+ from pmetric.Metrics
+ to pmetric.Metrics
+ expectFrom pmetric.Metrics
+ expectTo pmetric.Metrics
+ }{
+ // gauge
+ {
+ name: "gauge/move_none",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return false
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectTo: pmetric.NewMetrics(),
+ },
+ {
+ name: "gauge/move_all",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return true
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetric.NewMetrics(),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ },
+ {
+ name: "gauge/move_all_from_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ expectTo: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
+ },
+ {
+ name: "gauge/move_all_from_one_scope",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewGauges("B", "C", "EF", "GH"),
+ },
+ {
+ name: "gauge/move_all_from_one_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ },
+ {
+ name: "gauge/move_all_from_one_scope_in_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return sl.Scope().Name() == "scopeD"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewGauges("AB", "C", "EF", "GH"),
+ expectTo: pmetricutiltest.NewGauges("AB", "D", "EF", "GH"),
+ },
+ {
+ name: "gauge/move_all_from_one_metric_in_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricF"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
+ },
+ {
+ name: "gauge/move_one",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ rname, ok1 := rl.Resource().Attributes().Get("resourceName")
+ dpname, ok2 := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok1 && ok2 && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewGauges("A", "D", "F", "G"),
+ },
+ {
+ name: "gauge/move_one_from_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && sl.Scope().Name() == "scopeD" && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewGauges("AB", "D", "E", "G"),
+ },
+ {
+ name: "gauge/move_one_from_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "E", "G"),
+ },
+ {
+ name: "gauge/move_one_from_each_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "EF", "H"),
+ expectTo: pmetricutiltest.NewGauges("AB", "CD", "EF", "G"),
+ },
+ {
+ name: "gauge/move_one_from_each_scope_in_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewGauges("B", "CD", "E", "GH"),
+ },
+ {
+ name: "gauge/move_some_to_preexisting",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ to: pmetricutiltest.NewGauges("1", "2", "3", "4"),
+ expectFrom: pmetricutiltest.NewGauges("AB", "CD", "EF", "H"),
+ expectTo: func() pmetric.Metrics {
+ orig := pmetricutiltest.NewGauges("1", "2", "3", "4")
+ extra := pmetricutiltest.NewGauges("AB", "CD", "EF", "G")
+ extra.ResourceMetrics().MoveAndAppendTo(orig.ResourceMetrics())
+ return orig
+ }(),
+ },
+
+ // sum
+ {
+ name: "sum/move_none",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return false
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ expectTo: pmetric.NewMetrics(),
+ },
+ {
+ name: "sum/move_all",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return true
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetric.NewMetrics(),
+ expectTo: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ },
+ {
+ name: "sum/move_all_from_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSums("A", "CD", "EF", "GH"),
+ expectTo: pmetricutiltest.NewSums("B", "CD", "EF", "GH"),
+ },
+ {
+ name: "sum/move_all_from_one_scope",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSums("B", "C", "EF", "GH"),
+ },
+ {
+ name: "sum/move_all_from_one_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSums("AB", "CD", "F", "GH"),
+ expectTo: pmetricutiltest.NewSums("AB", "CD", "E", "GH"),
+ },
+ {
+ name: "sum/move_all_from_one_scope_in_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return sl.Scope().Name() == "scopeD"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSums("AB", "C", "EF", "GH"),
+ expectTo: pmetricutiltest.NewSums("AB", "D", "EF", "GH"),
+ },
+ {
+ name: "sum/move_all_from_one_metric_in_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricF"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSums("AB", "CD", "E", "GH"),
+ expectTo: pmetricutiltest.NewSums("AB", "CD", "F", "GH"),
+ },
+ {
+ name: "sum/move_one",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ rname, ok1 := rl.Resource().Attributes().Get("resourceName")
+ dpname, ok2 := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok1 && ok2 && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSums("A", "D", "F", "G"),
+ },
+ {
+ name: "sum/move_one_from_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && sl.Scope().Name() == "scopeD" && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSums("AB", "D", "E", "G"),
+ },
+ {
+ name: "sum/move_one_from_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSums("AB", "CD", "E", "G"),
+ },
+ {
+ name: "sum/move_one_from_each_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSums("AB", "CD", "EF", "H"),
+ expectTo: pmetricutiltest.NewSums("AB", "CD", "EF", "G"),
+ },
+ {
+ name: "sum/move_one_from_each_scope_in_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Sum("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSums("B", "CD", "E", "GH"),
+ },
+ {
+ name: "sum/move_some_to_preexisting",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.NumberDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSums("AB", "CD", "EF", "GH"),
+ to: pmetricutiltest.NewSums("1", "2", "3", "4"),
+ expectFrom: pmetricutiltest.NewSums("AB", "CD", "EF", "H"),
+ expectTo: func() pmetric.Metrics {
+ orig := pmetricutiltest.NewSums("1", "2", "3", "4")
+ extra := pmetricutiltest.NewSums("AB", "CD", "EF", "G")
+ extra.ResourceMetrics().MoveAndAppendTo(orig.ResourceMetrics())
+ return orig
+ }(),
+ },
+
+ // histogram
+ {
+ name: "histogram/move_none",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return false
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ expectTo: pmetric.NewMetrics(),
+ },
+ {
+ name: "histogram/move_all",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return true
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetric.NewMetrics(),
+ expectTo: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ },
+ {
+ name: "histogram/move_all_from_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewHistograms("A", "CD", "EF", "GH"),
+ expectTo: pmetricutiltest.NewHistograms("B", "CD", "EF", "GH"),
+ },
+ {
+ name: "histogram/move_all_from_one_scope",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewHistograms("B", "C", "EF", "GH"),
+ },
+ {
+ name: "histogram/move_all_from_one_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewHistograms("AB", "CD", "F", "GH"),
+ expectTo: pmetricutiltest.NewHistograms("AB", "CD", "E", "GH"),
+ },
+ {
+ name: "histogram/move_all_from_one_scope_in_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return sl.Scope().Name() == "scopeD"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewHistograms("AB", "C", "EF", "GH"),
+ expectTo: pmetricutiltest.NewHistograms("AB", "D", "EF", "GH"),
+ },
+ {
+ name: "histogram/move_all_from_one_metric_in_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricF"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewHistograms("AB", "CD", "E", "GH"),
+ expectTo: pmetricutiltest.NewHistograms("AB", "CD", "F", "GH"),
+ },
+ {
+ name: "histogram/move_one",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ rname, ok1 := rl.Resource().Attributes().Get("resourceName")
+ dpname, ok2 := dp.(pmetric.HistogramDataPoint).Attributes().Get("dpName")
+ return ok1 && ok2 && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewHistograms("A", "D", "F", "G"),
+ },
+ {
+ name: "histogram/move_one_from_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.HistogramDataPoint).Attributes().Get("dpName")
+ return ok && sl.Scope().Name() == "scopeD" && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewHistograms("AB", "D", "E", "G"),
+ },
+ {
+ name: "histogram/move_one_from_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.HistogramDataPoint).Attributes().Get("dpName")
+ return ok && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewHistograms("AB", "CD", "E", "G"),
+ },
+ {
+ name: "histogram/move_one_from_each_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.HistogramDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewHistograms("AB", "CD", "EF", "H"),
+ expectTo: pmetricutiltest.NewHistograms("AB", "CD", "EF", "G"),
+ },
+ {
+ name: "histogram/move_one_from_each_scope_in_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Histogram("F", pmetricutiltest.HistogramDataPoint("G"), pmetricutiltest.HistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewHistograms("B", "CD", "E", "GH"),
+ },
+ {
+ name: "histogram/move_some_to_preexisting",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.HistogramDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewHistograms("AB", "CD", "EF", "GH"),
+ to: pmetricutiltest.NewHistograms("1", "2", "3", "4"),
+ expectFrom: pmetricutiltest.NewHistograms("AB", "CD", "EF", "H"),
+ expectTo: func() pmetric.Metrics {
+ orig := pmetricutiltest.NewHistograms("1", "2", "3", "4")
+ extra := pmetricutiltest.NewHistograms("AB", "CD", "EF", "G")
+ extra.ResourceMetrics().MoveAndAppendTo(orig.ResourceMetrics())
+ return orig
+ }(),
+ },
+
+ // exponential_histogram
+ {
+ name: "exponential_histogram/move_none",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return false
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ expectTo: pmetric.NewMetrics(),
+ },
+ {
+ name: "exponential_histogram/move_all",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return true
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetric.NewMetrics(),
+ expectTo: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ },
+ {
+ name: "exponential_histogram/move_all_from_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewExponentialHistograms("A", "CD", "EF", "GH"),
+ expectTo: pmetricutiltest.NewExponentialHistograms("B", "CD", "EF", "GH"),
+ },
+ {
+ name: "exponential_histogram/move_all_from_one_scope",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewExponentialHistograms("B", "C", "EF", "GH"),
+ },
+ {
+ name: "exponential_histogram/move_all_from_one_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewExponentialHistograms("AB", "CD", "F", "GH"),
+ expectTo: pmetricutiltest.NewExponentialHistograms("AB", "CD", "E", "GH"),
+ },
+ {
+ name: "exponential_histogram/move_all_from_one_scope_in_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return sl.Scope().Name() == "scopeD"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewExponentialHistograms("AB", "C", "EF", "GH"),
+ expectTo: pmetricutiltest.NewExponentialHistograms("AB", "D", "EF", "GH"),
+ },
+ {
+ name: "exponential_histogram/move_all_from_one_metric_in_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricF"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewExponentialHistograms("AB", "CD", "E", "GH"),
+ expectTo: pmetricutiltest.NewExponentialHistograms("AB", "CD", "F", "GH"),
+ },
+ {
+ name: "exponential_histogram/move_one",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ rname, ok1 := rl.Resource().Attributes().Get("resourceName")
+ dpname, ok2 := dp.(pmetric.ExponentialHistogramDataPoint).Attributes().Get("dpName")
+ return ok1 && ok2 && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewExponentialHistograms("A", "D", "F", "G"),
+ },
+ {
+ name: "exponential_histogram/move_one_from_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.ExponentialHistogramDataPoint).Attributes().Get("dpName")
+ return ok && sl.Scope().Name() == "scopeD" && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewExponentialHistograms("AB", "D", "E", "G"),
+ },
+ {
+ name: "exponential_histogram/move_one_from_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.ExponentialHistogramDataPoint).Attributes().Get("dpName")
+ return ok && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewExponentialHistograms("AB", "CD", "E", "G"),
+ },
+ {
+ name: "exponential_histogram/move_one_from_each_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.ExponentialHistogramDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "H"),
+ expectTo: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "G"),
+ },
+ {
+ name: "exponential_histogram/move_one_from_each_scope_in_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("E", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("G"), pmetricutiltest.ExponentialHistogramDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewExponentialHistograms("B", "CD", "E", "GH"),
+ },
+ {
+ name: "exponential_histogram/move_some_to_preexisting",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.ExponentialHistogramDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "GH"),
+ to: pmetricutiltest.NewExponentialHistograms("1", "2", "3", "4"),
+ expectFrom: pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "H"),
+ expectTo: func() pmetric.Metrics {
+ orig := pmetricutiltest.NewExponentialHistograms("1", "2", "3", "4")
+ extra := pmetricutiltest.NewExponentialHistograms("AB", "CD", "EF", "G")
+ extra.ResourceMetrics().MoveAndAppendTo(orig.ResourceMetrics())
+ return orig
+ }(),
+ },
+
+ // summary
+ {
+ name: "summary/move_none",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return false
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ expectTo: pmetric.NewMetrics(),
+ },
+ {
+ name: "summary/move_all",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return true
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetric.NewMetrics(),
+ expectTo: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ },
+ {
+ name: "summary/move_all_from_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSummaries("A", "CD", "EF", "GH"),
+ expectTo: pmetricutiltest.NewSummaries("B", "CD", "EF", "GH"),
+ },
+ {
+ name: "summary/move_all_from_one_scope",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSummaries("B", "C", "EF", "GH"),
+ },
+ {
+ name: "summary/move_all_from_one_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSummaries("AB", "CD", "F", "GH"),
+ expectTo: pmetricutiltest.NewSummaries("AB", "CD", "E", "GH"),
+ },
+ {
+ name: "summary/move_all_from_one_scope_in_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric, _ any) bool {
+ return sl.Scope().Name() == "scopeD"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSummaries("AB", "C", "EF", "GH"),
+ expectTo: pmetricutiltest.NewSummaries("AB", "D", "EF", "GH"),
+ },
+ {
+ name: "summary/move_all_from_one_metric_in_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ return m.Name() == "metricF"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSummaries("AB", "CD", "E", "GH"),
+ expectTo: pmetricutiltest.NewSummaries("AB", "CD", "F", "GH"),
+ },
+ {
+ name: "summary/move_one",
+ moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ rname, ok1 := rl.Resource().Attributes().Get("resourceName")
+ dpname, ok2 := dp.(pmetric.SummaryDataPoint).Attributes().Get("dpName")
+ return ok1 && ok2 && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSummaries("A", "D", "F", "G"),
+ },
+ {
+ name: "summary/move_one_from_each_resource",
+ moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.SummaryDataPoint).Attributes().Get("dpName")
+ return ok && sl.Scope().Name() == "scopeD" && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSummaries("AB", "D", "E", "G"),
+ },
+ {
+ name: "summary/move_one_from_each_scope",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.SummaryDataPoint).Attributes().Get("dpName")
+ return ok && m.Name() == "metricE" && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSummaries("AB", "CD", "E", "G"),
+ },
+ {
+ name: "summary/move_one_from_each_metric",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.SummaryDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewSummaries("AB", "CD", "EF", "H"),
+ expectTo: pmetricutiltest.NewSummaries("AB", "CD", "EF", "G"),
+ },
+ {
+ name: "summary/move_one_from_each_scope_in_one_resource",
+ moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric, _ any) bool {
+ rname, ok := rl.Resource().Attributes().Get("resourceName")
+ return ok && rname.AsString() == "resourceB" && m.Name() == "metricE"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetric.NewMetrics(),
+ expectFrom: pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("E", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ pmetricutiltest.Resource("B",
+ pmetricutiltest.Scope("C",
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ pmetricutiltest.Scope("D",
+ pmetricutiltest.Summary("F", pmetricutiltest.SummaryDataPoint("G"), pmetricutiltest.SummaryDataPoint("H")),
+ ),
+ ),
+ ),
+ expectTo: pmetricutiltest.NewSummaries("B", "CD", "E", "GH"),
+ },
+ {
+ name: "summary/move_some_to_preexisting",
+ moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric, dp any) bool {
+ dpname, ok := dp.(pmetric.SummaryDataPoint).Attributes().Get("dpName")
+ return ok && dpname.AsString() == "dpG"
+ },
+ from: pmetricutiltest.NewSummaries("AB", "CD", "EF", "GH"),
+ to: pmetricutiltest.NewSummaries("1", "2", "3", "4"),
+ expectFrom: pmetricutiltest.NewSummaries("AB", "CD", "EF", "H"),
+ expectTo: func() pmetric.Metrics {
+ orig := pmetricutiltest.NewSummaries("1", "2", "3", "4")
+ extra := pmetricutiltest.NewSummaries("AB", "CD", "EF", "G")
+ extra.ResourceMetrics().MoveAndAppendTo(orig.ResourceMetrics())
+ return orig
+ }(),
+ },
+ }
+
+ for _, tt := range testCases {
+ t.Run(tt.name, func(t *testing.T) {
+ pmetricutil.MoveDataPointsWithContextIf(tt.from, tt.to, tt.moveIf)
+ assert.NoError(t, pmetrictest.CompareMetrics(tt.expectFrom, tt.from), "from not modified as expected")
+ assert.NoError(t, pmetrictest.CompareMetrics(tt.expectTo, tt.to), "to not as expected")
+ })
+ }
+}
diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics.go b/connector/routingconnector/internal/pmetricutiltest/metrics.go
index fb1902759e70..e041f53ecdd5 100644
--- a/connector/routingconnector/internal/pmetricutiltest/metrics.go
+++ b/connector/routingconnector/internal/pmetricutiltest/metrics.go
@@ -5,7 +5,7 @@ package pmetricutiltest // import "github.com/open-telemetry/opentelemetry-colle
import "go.opentelemetry.io/collector/pdata/pmetric"
-// NewMetrics returns a pmetric.Metrics with a uniform structure where resources, scopes, metrics,
+// NewGauges returns a pmetric.Metrics with a uniform structure where resources, scopes, metrics,
// and datapoints are identical across all instances, except for one identifying field.
//
// Identifying fields:
@@ -14,7 +14,7 @@ import "go.opentelemetry.io/collector/pdata/pmetric"
// - Metrics have a name with a value of "metricN" and a single time series of data points.
// - DataPoints have an attribute "dpName" with a value of "dpN".
//
-// Example: NewMetrics("AB", "XYZ", "MN", "1234") returns:
+// Example: NewGauges("AB", "XYZ", "MN", "1234") returns:
//
// resourceA, resourceB
// each with scopeX, scopeY, scopeZ
@@ -22,7 +22,7 @@ import "go.opentelemetry.io/collector/pdata/pmetric"
// each with dp1, dp2, dp3, dp4
//
// Each byte in the input string is a unique ID for the corresponding element.
-func NewMetrics(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.Metrics {
+func NewGauges(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.Metrics {
md := pmetric.NewMetrics()
for resourceN := 0; resourceN < len(resourceIDs); resourceN++ {
rm := md.ResourceMetrics().AppendEmpty()
@@ -44,6 +44,94 @@ func NewMetrics(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.M
return md
}
+func NewSums(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ for resourceN := 0; resourceN < len(resourceIDs); resourceN++ {
+ rm := md.ResourceMetrics().AppendEmpty()
+ rm.Resource().Attributes().PutStr("resourceName", "resource"+string(resourceIDs[resourceN]))
+ for scopeN := 0; scopeN < len(scopeIDs); scopeN++ {
+ sm := rm.ScopeMetrics().AppendEmpty()
+ sm.Scope().SetName("scope" + string(scopeIDs[scopeN]))
+ for metricN := 0; metricN < len(metricIDs); metricN++ {
+ m := sm.Metrics().AppendEmpty()
+ m.SetName("metric" + string(metricIDs[metricN]))
+ dps := m.SetEmptySum()
+ for dataPointN := 0; dataPointN < len(dataPointIDs); dataPointN++ {
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dp"+string(dataPointIDs[dataPointN]))
+ }
+ }
+ }
+ }
+ return md
+}
+
+func NewHistograms(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ for resourceN := 0; resourceN < len(resourceIDs); resourceN++ {
+ rm := md.ResourceMetrics().AppendEmpty()
+ rm.Resource().Attributes().PutStr("resourceName", "resource"+string(resourceIDs[resourceN]))
+ for scopeN := 0; scopeN < len(scopeIDs); scopeN++ {
+ sm := rm.ScopeMetrics().AppendEmpty()
+ sm.Scope().SetName("scope" + string(scopeIDs[scopeN]))
+ for metricN := 0; metricN < len(metricIDs); metricN++ {
+ m := sm.Metrics().AppendEmpty()
+ m.SetName("metric" + string(metricIDs[metricN]))
+ dps := m.SetEmptyHistogram()
+ for dataPointN := 0; dataPointN < len(dataPointIDs); dataPointN++ {
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dp"+string(dataPointIDs[dataPointN]))
+ }
+ }
+ }
+ }
+ return md
+}
+
+func NewExponentialHistograms(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ for resourceN := 0; resourceN < len(resourceIDs); resourceN++ {
+ rm := md.ResourceMetrics().AppendEmpty()
+ rm.Resource().Attributes().PutStr("resourceName", "resource"+string(resourceIDs[resourceN]))
+ for scopeN := 0; scopeN < len(scopeIDs); scopeN++ {
+ sm := rm.ScopeMetrics().AppendEmpty()
+ sm.Scope().SetName("scope" + string(scopeIDs[scopeN]))
+ for metricN := 0; metricN < len(metricIDs); metricN++ {
+ m := sm.Metrics().AppendEmpty()
+ m.SetName("metric" + string(metricIDs[metricN]))
+ dps := m.SetEmptyExponentialHistogram()
+ for dataPointN := 0; dataPointN < len(dataPointIDs); dataPointN++ {
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dp"+string(dataPointIDs[dataPointN]))
+ }
+ }
+ }
+ }
+ return md
+}
+
+func NewSummaries(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ for resourceN := 0; resourceN < len(resourceIDs); resourceN++ {
+ rm := md.ResourceMetrics().AppendEmpty()
+ rm.Resource().Attributes().PutStr("resourceName", "resource"+string(resourceIDs[resourceN]))
+ for scopeN := 0; scopeN < len(scopeIDs); scopeN++ {
+ sm := rm.ScopeMetrics().AppendEmpty()
+ sm.Scope().SetName("scope" + string(scopeIDs[scopeN]))
+ for metricN := 0; metricN < len(metricIDs); metricN++ {
+ m := sm.Metrics().AppendEmpty()
+ m.SetName("metric" + string(metricIDs[metricN]))
+ dps := m.SetEmptySummary()
+ for dataPointN := 0; dataPointN < len(dataPointIDs); dataPointN++ {
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dp"+string(dataPointIDs[dataPointN]))
+ }
+ }
+ }
+ }
+ return md
+}
+
func NewMetricsFromOpts(resources ...pmetric.ResourceMetrics) pmetric.Metrics {
md := pmetric.NewMetrics()
for _, resource := range resources {
@@ -70,7 +158,7 @@ func Scope(id string, metrics ...pmetric.Metric) pmetric.ScopeMetrics {
return s
}
-func Metric(id string, dps ...pmetric.NumberDataPoint) pmetric.Metric {
+func Gauge(id string, dps ...pmetric.NumberDataPoint) pmetric.Metric {
m := pmetric.NewMetric()
m.SetName("metric" + id)
g := m.SetEmptyGauge()
@@ -80,8 +168,66 @@ func Metric(id string, dps ...pmetric.NumberDataPoint) pmetric.Metric {
return m
}
+func Sum(id string, dps ...pmetric.NumberDataPoint) pmetric.Metric {
+ m := pmetric.NewMetric()
+ m.SetName("metric" + id)
+ g := m.SetEmptySum()
+ for _, dp := range dps {
+ dp.CopyTo(g.DataPoints().AppendEmpty())
+ }
+ return m
+}
+
func NumberDataPoint(id string) pmetric.NumberDataPoint {
dp := pmetric.NewNumberDataPoint()
dp.Attributes().PutStr("dpName", "dp"+id)
return dp
}
+
+func Histogram(id string, dps ...pmetric.HistogramDataPoint) pmetric.Metric {
+ m := pmetric.NewMetric()
+ m.SetName("metric" + id)
+ g := m.SetEmptyHistogram()
+ for _, dp := range dps {
+ dp.CopyTo(g.DataPoints().AppendEmpty())
+ }
+ return m
+}
+
+func HistogramDataPoint(id string) pmetric.HistogramDataPoint {
+ dp := pmetric.NewHistogramDataPoint()
+ dp.Attributes().PutStr("dpName", "dp"+id)
+ return dp
+}
+
+func ExponentialHistogram(id string, dps ...pmetric.ExponentialHistogramDataPoint) pmetric.Metric {
+ m := pmetric.NewMetric()
+ m.SetName("metric" + id)
+ g := m.SetEmptyExponentialHistogram()
+ for _, dp := range dps {
+ dp.CopyTo(g.DataPoints().AppendEmpty())
+ }
+ return m
+}
+
+func ExponentialHistogramDataPoint(id string) pmetric.ExponentialHistogramDataPoint {
+ dp := pmetric.NewExponentialHistogramDataPoint()
+ dp.Attributes().PutStr("dpName", "dp"+id)
+ return dp
+}
+
+func Summary(id string, dps ...pmetric.SummaryDataPoint) pmetric.Metric {
+ m := pmetric.NewMetric()
+ m.SetName("metric" + id)
+ g := m.SetEmptySummary()
+ for _, dp := range dps {
+ dp.CopyTo(g.DataPoints().AppendEmpty())
+ }
+ return m
+}
+
+func SummaryDataPoint(id string) pmetric.SummaryDataPoint {
+ dp := pmetric.NewSummaryDataPoint()
+ dp.Attributes().PutStr("dpName", "dp"+id)
+ return dp
+}
diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go
index 9fcd2edebef6..47a73b5afc37 100644
--- a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go
+++ b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go
@@ -16,30 +16,120 @@ import (
func TestNewMetrics(t *testing.T) {
t.Run("empty", func(t *testing.T) {
expected := pmetric.NewMetrics()
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("", "", "", "")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewGauges("", "", "", "")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSums("", "", "", "")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewHistograms("", "", "", "")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewExponentialHistograms("", "", "", "")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSummaries("", "", "", "")))
assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts()))
})
t.Run("simple", func(t *testing.T) {
- expected := func() pmetric.Metrics {
- md := pmetric.NewMetrics()
- r := md.ResourceMetrics().AppendEmpty()
- r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
- s := r.ScopeMetrics().AppendEmpty()
- s.Scope().SetName("scopeB") // resourceA.scopeB
- m := s.Metrics().AppendEmpty()
- m.SetName("metricC") // resourceA.scopeB.metricC
- dps := m.SetEmptyGauge()
- dp := dps.DataPoints().AppendEmpty()
- dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
- return md
- }()
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "D")))
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
- pmetricutiltest.Resource("A",
- pmetricutiltest.Scope("B", pmetricutiltest.Metric("C", pmetricutiltest.NumberDataPoint("D"))),
- ),
- )))
+ t.Run("gauge", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyGauge()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewGauges("A", "B", "C", "D")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Gauge("C", pmetricutiltest.NumberDataPoint("D"))),
+ ),
+ )))
+ })
+ t.Run("sum", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptySum()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSums("A", "B", "C", "D")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Sum("C", pmetricutiltest.NumberDataPoint("D"))),
+ ),
+ )))
+ })
+ t.Run("histogram", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyHistogram()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewHistograms("A", "B", "C", "D")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Histogram("C", pmetricutiltest.HistogramDataPoint("D"))),
+ ),
+ )))
+ })
+ t.Run("exponential_histogram", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyExponentialHistogram()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewExponentialHistograms("A", "B", "C", "D")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.ExponentialHistogram("C", pmetricutiltest.ExponentialHistogramDataPoint("D"))),
+ ),
+ )))
+ })
+ t.Run("summary", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptySummary()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSummaries("A", "B", "C", "D")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Summary("C", pmetricutiltest.SummaryDataPoint("D"))),
+ ),
+ )))
+ })
})
t.Run("two_resources", func(t *testing.T) {
@@ -65,13 +155,13 @@ func TestNewMetrics(t *testing.T) {
dp.Attributes().PutStr("dpName", "dpE") // resourceB.scopeC.metricD.dpE
return md
}()
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("AB", "C", "D", "E")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewGauges("AB", "C", "D", "E")))
assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("A",
- pmetricutiltest.Scope("C", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))),
+ pmetricutiltest.Scope("C", pmetricutiltest.Gauge("D", pmetricutiltest.NumberDataPoint("E"))),
),
pmetricutiltest.Resource("B",
- pmetricutiltest.Scope("C", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))),
+ pmetricutiltest.Scope("C", pmetricutiltest.Gauge("D", pmetricutiltest.NumberDataPoint("E"))),
),
)))
})
@@ -97,65 +187,340 @@ func TestNewMetrics(t *testing.T) {
dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeC.metricD.dpE
return md
}()
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "BC", "D", "E")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewGauges("A", "BC", "D", "E")))
assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("A",
- pmetricutiltest.Scope("B", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))),
- pmetricutiltest.Scope("C", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))),
+ pmetricutiltest.Scope("B", pmetricutiltest.Gauge("D", pmetricutiltest.NumberDataPoint("E"))),
+ pmetricutiltest.Scope("C", pmetricutiltest.Gauge("D", pmetricutiltest.NumberDataPoint("E"))),
),
)))
})
t.Run("two_metrics", func(t *testing.T) {
- expected := func() pmetric.Metrics {
- md := pmetric.NewMetrics()
- r := md.ResourceMetrics().AppendEmpty()
- r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
- s := r.ScopeMetrics().AppendEmpty()
- s.Scope().SetName("scopeB") // resourceA.scopeB
- m := s.Metrics().AppendEmpty()
- m.SetName("metricC") // resourceA.scopeB.metricC
- dps := m.SetEmptyGauge()
- dp := dps.DataPoints().AppendEmpty()
- dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
- m = s.Metrics().AppendEmpty()
- m.SetName("metricD") // resourceA.scopeB.metricD
- dps = m.SetEmptyGauge()
- dp = dps.DataPoints().AppendEmpty()
- dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE
- return md
- }()
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "CD", "E")))
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
- pmetricutiltest.Resource("A",
- pmetricutiltest.Scope("B",
- pmetricutiltest.Metric("C", pmetricutiltest.NumberDataPoint("E")),
- pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E")),
+ t.Run("gauges", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyGauge()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricD") // resourceA.scopeB.metricD
+ dps = m.SetEmptyGauge()
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewGauges("A", "B", "CD", "E")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B",
+ pmetricutiltest.Gauge("C", pmetricutiltest.NumberDataPoint("E")),
+ pmetricutiltest.Gauge("D", pmetricutiltest.NumberDataPoint("E")),
+ ),
),
- ),
- )))
+ )))
+ })
+ t.Run("sums", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptySum()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricD") // resourceA.scopeB.metricD
+ dps = m.SetEmptySum()
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSums("A", "B", "CD", "E")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B",
+ pmetricutiltest.Sum("C", pmetricutiltest.NumberDataPoint("E")),
+ pmetricutiltest.Sum("D", pmetricutiltest.NumberDataPoint("E")),
+ ),
+ ),
+ )))
+ })
+ t.Run("histograms", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyHistogram()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricD") // resourceA.scopeB.metricD
+ dps = m.SetEmptyHistogram()
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewHistograms("A", "B", "CD", "E")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B",
+ pmetricutiltest.Histogram("C", pmetricutiltest.HistogramDataPoint("E")),
+ pmetricutiltest.Histogram("D", pmetricutiltest.HistogramDataPoint("E")),
+ ),
+ ),
+ )))
+ })
+ t.Run("exponential_histograms", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyExponentialHistogram()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricD") // resourceA.scopeB.metricD
+ dps = m.SetEmptyExponentialHistogram()
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewExponentialHistograms("A", "B", "CD", "E")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B",
+ pmetricutiltest.ExponentialHistogram("C", pmetricutiltest.ExponentialHistogramDataPoint("E")),
+ pmetricutiltest.ExponentialHistogram("D", pmetricutiltest.ExponentialHistogramDataPoint("E")),
+ ),
+ ),
+ )))
+ })
+ t.Run("summaries", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptySummary()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricD") // resourceA.scopeB.metricD
+ dps = m.SetEmptySummary()
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSummaries("A", "B", "CD", "E")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B",
+ pmetricutiltest.Summary("C", pmetricutiltest.SummaryDataPoint("E")),
+ pmetricutiltest.Summary("D", pmetricutiltest.SummaryDataPoint("E")),
+ ),
+ ),
+ )))
+ })
})
t.Run("two_datapoints", func(t *testing.T) {
+ t.Run("gauge", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyGauge()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewGauges("A", "B", "C", "DE")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Gauge("C", pmetricutiltest.NumberDataPoint("D"), pmetricutiltest.NumberDataPoint("E"))),
+ ),
+ )))
+ })
+ t.Run("sum", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptySum()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSums("A", "B", "C", "DE")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Sum("C", pmetricutiltest.NumberDataPoint("D"), pmetricutiltest.NumberDataPoint("E"))),
+ ),
+ )))
+ })
+ t.Run("histogram", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyHistogram()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewHistograms("A", "B", "C", "DE")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Histogram("C", pmetricutiltest.HistogramDataPoint("D"), pmetricutiltest.HistogramDataPoint("E"))),
+ ),
+ )))
+ })
+ t.Run("exponential_histogram", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptyExponentialHistogram()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewExponentialHistograms("A", "B", "C", "DE")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.ExponentialHistogram("C", pmetricutiltest.ExponentialHistogramDataPoint("D"), pmetricutiltest.ExponentialHistogramDataPoint("E"))),
+ ),
+ )))
+ })
+ t.Run("summary", func(t *testing.T) {
+ expected := func() pmetric.Metrics {
+ md := pmetric.NewMetrics()
+ r := md.ResourceMetrics().AppendEmpty()
+ r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
+ s := r.ScopeMetrics().AppendEmpty()
+ s.Scope().SetName("scopeB") // resourceA.scopeB
+ m := s.Metrics().AppendEmpty()
+ m.SetName("metricC") // resourceA.scopeB.metricC
+ dps := m.SetEmptySummary()
+ dp := dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
+ dp = dps.DataPoints().AppendEmpty()
+ dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ return md
+ }()
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewSummaries("A", "B", "C", "DE")))
+ assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
+ pmetricutiltest.Resource("A",
+ pmetricutiltest.Scope("B", pmetricutiltest.Summary("C", pmetricutiltest.SummaryDataPoint("D"), pmetricutiltest.SummaryDataPoint("E"))),
+ ),
+ )))
+ })
+ })
+
+ t.Run("all_metric_types", func(t *testing.T) {
expected := func() pmetric.Metrics {
md := pmetric.NewMetrics()
r := md.ResourceMetrics().AppendEmpty()
r.Resource().Attributes().PutStr("resourceName", "resourceA") // resourceA
s := r.ScopeMetrics().AppendEmpty()
s.Scope().SetName("scopeB") // resourceA.scopeB
+
m := s.Metrics().AppendEmpty()
m.SetName("metricC") // resourceA.scopeB.metricC
- dps := m.SetEmptyGauge()
- dp := dps.DataPoints().AppendEmpty()
- dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD
- dp = dps.DataPoints().AppendEmpty()
- dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE
+ gauge := m.SetEmptyGauge()
+ ndp := gauge.DataPoints().AppendEmpty()
+ ndp.Attributes().PutStr("dpName", "dpX") // resourceA.scopeB.metricC.dpX
+ ndp = gauge.DataPoints().AppendEmpty()
+ ndp.Attributes().PutStr("dpName", "dpY") // resourceA.scopeB.metricC.dpY
+
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricD") // resourceA.scopeB.metricD
+ sum := m.SetEmptySum()
+ ndp = sum.DataPoints().AppendEmpty()
+ ndp.Attributes().PutStr("dpName", "dpX") // resourceA.scopeB.metricD.dpX
+ ndp = sum.DataPoints().AppendEmpty()
+ ndp.Attributes().PutStr("dpName", "dpY") // resourceA.scopeB.metricD.dpY
+
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricE") // resourceA.scopeB.metricE
+ hist := m.SetEmptyHistogram()
+ hdp := hist.DataPoints().AppendEmpty()
+ hdp.Attributes().PutStr("dpName", "dpX") // resourceA.scopeB.metricE.dpX
+ hdp = hist.DataPoints().AppendEmpty()
+ hdp.Attributes().PutStr("dpName", "dpY") // resourceA.scopeB.metricE.dpY
+
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricF") // resourceA.scopeB.metricF
+ expHist := m.SetEmptyExponentialHistogram()
+ edp := expHist.DataPoints().AppendEmpty()
+ edp.Attributes().PutStr("dpName", "dpX") // resourceA.scopeB.metricF.dpX
+ edp = expHist.DataPoints().AppendEmpty()
+ edp.Attributes().PutStr("dpName", "dpY") // resourceA.scopeB.metricF.dpY
+
+ m = s.Metrics().AppendEmpty()
+ m.SetName("metricG") // resourceA.scopeB.metricG
+ smry := m.SetEmptySummary()
+ sdp := smry.DataPoints().AppendEmpty()
+ sdp.Attributes().PutStr("dpName", "dpX") // resourceA.scopeB.metricG.dpX
+ sdp = smry.DataPoints().AppendEmpty()
+ sdp.Attributes().PutStr("dpName", "dpY") // resourceA.scopeB.metricG.dpY
+
return md
}()
- assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "DE")))
assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("A",
- pmetricutiltest.Scope("B", pmetricutiltest.Metric("C", pmetricutiltest.NumberDataPoint("D"), pmetricutiltest.NumberDataPoint("E"))),
+ pmetricutiltest.Scope("B",
+ pmetricutiltest.Gauge("C", pmetricutiltest.NumberDataPoint("X"), pmetricutiltest.NumberDataPoint("Y")),
+ pmetricutiltest.Sum("D", pmetricutiltest.NumberDataPoint("X"), pmetricutiltest.NumberDataPoint("Y")),
+ pmetricutiltest.Histogram("E", pmetricutiltest.HistogramDataPoint("X"), pmetricutiltest.HistogramDataPoint("Y")),
+ pmetricutiltest.ExponentialHistogram("F", pmetricutiltest.ExponentialHistogramDataPoint("X"), pmetricutiltest.ExponentialHistogramDataPoint("Y")),
+ pmetricutiltest.Summary("G", pmetricutiltest.SummaryDataPoint("X"), pmetricutiltest.SummaryDataPoint("Y")),
+ ),
),
)))
})
diff --git a/connector/routingconnector/metrics.go b/connector/routingconnector/metrics.go
index 7bdc81519b4b..92bd654caa47 100644
--- a/connector/routingconnector/metrics.go
+++ b/connector/routingconnector/metrics.go
@@ -15,6 +15,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/pmetricutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource"
)
@@ -97,6 +98,15 @@ func (c *metricsConnector) switchMetrics(ctx context.Context, md pmetric.Metrics
return isMatch
},
)
+ case "datapoint":
+ pmetricutil.MoveDataPointsWithContextIf(md, matchedMetrics,
+ func(rm pmetric.ResourceMetrics, sm pmetric.ScopeMetrics, m pmetric.Metric, dp any) bool {
+ dptx := ottldatapoint.NewTransformContext(dp, m, sm.Metrics(), sm.Scope(), rm.Resource(), sm, rm)
+ _, isMatch, err := route.dataPointStatement.Execute(ctx, dptx)
+ errs = errors.Join(errs, err)
+ return isMatch
+ },
+ )
}
if errs != nil {
if c.config.ErrorMode == ottl.PropagateError {
diff --git a/connector/routingconnector/metrics_test.go b/connector/routingconnector/metrics_test.go
index fbbf7b383381..2f8335980940 100644
--- a/connector/routingconnector/metrics_test.go
+++ b/connector/routingconnector/metrics_test.go
@@ -515,9 +515,13 @@ func TestMetricsConnectorDetailed(t *testing.T) {
isMetricX := `name == "metricX"`
isMetricY := `name == "metricY"`
- isScopeCFromLowerContext := `instrumentation_scope.name == "scopeC"`
- isScopeDFromLowerContext := `instrumentation_scope.name == "scopeD"`
+ isDataPointG := `attributes["dpName"] == "dpG"`
+ isDataPointH := `attributes["dpName"] == "dpH"`
+ isDataPointX := `attributes["dpName"] == "dpX"`
+ isDataPointY := `attributes["dpName"] == "dpY"`
+ isMetricFFromLowerContext := `metric.name == "metricF"`
+ isScopeDFromLowerContext := `instrumentation_scope.name == "scopeD"`
isResourceBFromLowerContext := `resource.attributes["resourceName"] == "resourceB"`
testCases := []struct {
@@ -536,10 +540,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: context.Background(),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
expectSink1: pmetric.Metrics{},
- expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
},
{
name: "request/match_any_value",
@@ -554,8 +558,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
),
map[string]string{"X-Tenant": "notacme"},
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -566,8 +570,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -578,10 +582,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "notacme"}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
expectSink1: pmetric.Metrics{},
- expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
},
{
name: "request/match_http_value",
@@ -590,8 +594,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -602,8 +606,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"notacme", "acme"}}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -614,10 +618,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"notacme"}}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
expectSink1: pmetric.Metrics{},
- expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
},
{
name: "resource/all_match_first_only",
@@ -626,8 +630,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceY, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -638,9 +642,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", "true", idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
- expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -650,8 +654,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceA+" or "+isResourceB, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -662,9 +666,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceB, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -674,10 +678,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceB, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
- expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"),
- expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
},
{
name: "resource/some_match_without_default",
@@ -685,9 +689,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceX, idSink0),
withRoute("resource", isResourceB, idSink1),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
- expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -697,10 +701,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceY, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
expectSink1: pmetric.Metrics{},
- expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
},
{
name: "resource/match_none_without_default",
@@ -708,7 +712,7 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceX, idSink0),
withRoute("resource", isResourceY, idSink1),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
@@ -720,8 +724,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricY, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -732,9 +736,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", "true", idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
- expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -744,8 +748,8 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricE+" or "+isMetricF, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
},
@@ -756,9 +760,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricF, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -768,10 +772,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricF, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
- expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"),
- expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
},
{
name: "metric/some_match_without_default",
@@ -779,9 +783,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricX, idSink0),
withRoute("metric", isMetricF, idSink1),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
- expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -791,10 +795,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricY, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
expectSink1: pmetric.Metrics{},
- expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
},
{
name: "metric/match_none_without_default",
@@ -802,7 +806,7 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricX, idSink0),
withRoute("metric", isMetricY, idSink1),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
expectSink0: pmetric.Metrics{},
expectSink1: pmetric.Metrics{},
expectSinkD: pmetric.Metrics{},
@@ -814,22 +818,22 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricY, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
expectSink1: pmetric.Metrics{},
- expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
},
{
name: "metric/with_scope_condition",
cfg: testConfig(
- withRoute("metric", isScopeCFromLowerContext, idSink0),
+ withRoute("metric", isScopeDFromLowerContext, idSink0),
withRoute("metric", isMetricY, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "D", "EF", "GH"),
expectSink1: pmetric.Metrics{},
- expectSinkD: pmetricutiltest.NewMetrics("AB", "D", "EF", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "C", "EF", "GH"),
},
{
name: "metric/with_resource_and_scope_conditions",
@@ -838,28 +842,158 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricY, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("B", "D", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("B", "D", "EF", "GH"),
expectSink1: pmetric.Metrics{},
expectSinkD: pmetricutiltest.NewMetricsFromOpts(
pmetricutiltest.Resource("A",
pmetricutiltest.Scope("C",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
pmetricutiltest.Scope("D",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
pmetricutiltest.Resource("B",
pmetricutiltest.Scope("C",
- pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
- pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
+ pmetricutiltest.Gauge("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")),
),
),
),
},
+ {
+ name: "datapoint/all_match_first_only",
+ cfg: testConfig(
+ withRoute("datapoint", "true", idSink0),
+ withRoute("datapoint", isDataPointY, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink1: pmetric.Metrics{},
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "datapoint/all_match_last_only",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointX, idSink0),
+ withRoute("datapoint", "true", idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetric.Metrics{},
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "datapoint/all_match_only_once",
+ cfg: testConfig(
+ withRoute("datapoint", "true", idSink0),
+ withRoute("datapoint", isDataPointG+" or "+isDataPointH, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink1: pmetric.Metrics{},
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "datapoint/each_matches_one",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointG, idSink0),
+ withRoute("datapoint", isDataPointH, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "G"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "H"),
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "datapoint/some_match_with_default",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointX, idSink0),
+ withRoute("datapoint", isDataPointH, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetric.Metrics{},
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "H"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "EF", "G"),
+ },
+ {
+ name: "datapoint/some_match_without_default",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointX, idSink0),
+ withRoute("datapoint", isDataPointH, idSink1),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetric.Metrics{},
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "H"),
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "datapoint/match_none_with_default",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointX, idSink0),
+ withRoute("datapoint", isDataPointY, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetric.Metrics{},
+ expectSink1: pmetric.Metrics{},
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ },
+ {
+ name: "datapoint/match_none_without_default",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointX, idSink0),
+ withRoute("datapoint", isDataPointY, idSink1),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetric.Metrics{},
+ expectSink1: pmetric.Metrics{},
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "datapoint/with_resource_condition",
+ cfg: testConfig(
+ withRoute("datapoint", isResourceBFromLowerContext, idSink0),
+ withRoute("datapoint", isDataPointY, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
+ expectSink1: pmetric.Metrics{},
+ expectSinkD: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ },
+ {
+ name: "datapoint/with_scope_condition",
+ cfg: testConfig(
+ withRoute("datapoint", isScopeDFromLowerContext, idSink0),
+ withRoute("datapoint", isDataPointY, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "D", "EF", "GH"),
+ expectSink1: pmetric.Metrics{},
+ expectSinkD: pmetricutiltest.NewGauges("AB", "C", "EF", "GH"),
+ },
+ {
+ name: "datapoint/with_metric_condition",
+ cfg: testConfig(
+ withRoute("datapoint", isMetricFFromLowerContext, idSink0),
+ withRoute("datapoint", isDataPointY, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
+ expectSink1: pmetric.Metrics{},
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ },
{
name: "mixed/match_resource_then_metrics",
cfg: testConfig(
@@ -867,10 +1001,10 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("metric", isMetricE, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("B", "CD", "E", "GH"),
- expectSinkD: pmetricutiltest.NewMetrics("B", "CD", "F", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "E", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("B", "CD", "F", "GH"),
},
{
name: "mixed/match_metrics_then_resource",
@@ -879,10 +1013,58 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withRoute("resource", isResourceB, idSink1),
withDefault(idSinkD),
),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("B", "CD", "F", "GH"),
- expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "F", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "F", "GH"),
+ expectSinkD: pmetricutiltest.NewGauges("A", "CD", "F", "GH"),
+ },
+ {
+ name: "mixed/match_resource_then_datapoint",
+ cfg: testConfig(
+ withRoute("resource", isResourceA, idSink0),
+ withRoute("datapoint", isDataPointG, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "EF", "G"),
+ expectSinkD: pmetricutiltest.NewGauges("B", "CD", "EF", "H"),
+ },
+ {
+ name: "mixed/match_datapoint_then_resource",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointG, idSink0),
+ withRoute("resource", isResourceB, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "G"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "EF", "H"),
+ expectSinkD: pmetricutiltest.NewGauges("A", "CD", "EF", "H"),
+ },
+ {
+ name: "mixed/match_metric_then_datapoint",
+ cfg: testConfig(
+ withRoute("metric", isMetricE, idSink0),
+ withRoute("datapoint", isDataPointG, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "F", "G"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "F", "H"),
+ },
+ {
+ name: "mixed/match_datapoint_then_metric",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointG, idSink0),
+ withRoute("metric", isMetricE, idSink1),
+ withDefault(idSinkD),
+ ),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "G"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "E", "H"),
+ expectSinkD: pmetricutiltest.NewGauges("AB", "CD", "F", "H"),
},
{
name: "mixed/match_resource_then_grpc_request",
@@ -892,9 +1074,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -905,9 +1087,22 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "mixed/match_datapoint_then_grpc_request",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointG, idSink0),
+ withRoute("request", isAcme, idSink1),
+ withDefault(idSinkD),
+ ),
+ ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "G"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "H"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -918,9 +1113,9 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("A", "CD", "EF", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("B", "CD", "EF", "GH"),
expectSinkD: pmetric.Metrics{},
},
{
@@ -931,9 +1126,22 @@ func TestMetricsConnectorDetailed(t *testing.T) {
withDefault(idSinkD),
),
ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}),
- input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"),
- expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"),
- expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "F", "GH"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "E", "GH"),
+ expectSinkD: pmetric.Metrics{},
+ },
+ {
+ name: "mixed/match_datapoint_then_http_request",
+ cfg: testConfig(
+ withRoute("datapoint", isDataPointG, idSink0),
+ withRoute("request", isAcme, idSink1),
+ withDefault(idSinkD),
+ ),
+ ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}),
+ input: pmetricutiltest.NewGauges("AB", "CD", "EF", "GH"),
+ expectSink0: pmetricutiltest.NewGauges("AB", "CD", "EF", "G"),
+ expectSink1: pmetricutiltest.NewGauges("AB", "CD", "EF", "H"),
expectSinkD: pmetric.Metrics{},
},
}
diff --git a/connector/routingconnector/router.go b/connector/routingconnector/router.go
index 98f05bc92287..665bc59fa821 100644
--- a/connector/routingconnector/router.go
+++ b/connector/routingconnector/router.go
@@ -14,6 +14,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/common"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource"
@@ -31,11 +32,12 @@ type consumerProvider[C any] func(...pipeline.ID) (C, error)
// parameter C is expected to be one of: consumer.Traces, consumer.Metrics, or
// consumer.Logs.
type router[C any] struct {
- logger *zap.Logger
- resourceParser ottl.Parser[ottlresource.TransformContext]
- spanParser ottl.Parser[ottlspan.TransformContext]
- metricParser ottl.Parser[ottlmetric.TransformContext]
- logParser ottl.Parser[ottllog.TransformContext]
+ logger *zap.Logger
+ resourceParser ottl.Parser[ottlresource.TransformContext]
+ spanParser ottl.Parser[ottlspan.TransformContext]
+ metricParser ottl.Parser[ottlmetric.TransformContext]
+ dataPointParser ottl.Parser[ottldatapoint.TransformContext]
+ logParser ottl.Parser[ottllog.TransformContext]
table []RoutingTableItem
routes map[string]routingItem[C]
@@ -72,17 +74,18 @@ func newRouter[C any](
}
type routingItem[C any] struct {
- consumer C
- statementContext string
- requestCondition *requestCondition
- resourceStatement *ottl.Statement[ottlresource.TransformContext]
- spanStatement *ottl.Statement[ottlspan.TransformContext]
- metricStatement *ottl.Statement[ottlmetric.TransformContext]
- logStatement *ottl.Statement[ottllog.TransformContext]
+ consumer C
+ statementContext string
+ requestCondition *requestCondition
+ resourceStatement *ottl.Statement[ottlresource.TransformContext]
+ spanStatement *ottl.Statement[ottlspan.TransformContext]
+ metricStatement *ottl.Statement[ottlmetric.TransformContext]
+ dataPointStatement *ottl.Statement[ottldatapoint.TransformContext]
+ logStatement *ottl.Statement[ottllog.TransformContext]
}
func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.TelemetrySettings) error {
- var buildResource, buildSpan, buildMetric, buildLog bool
+ var buildResource, buildSpan, buildMetric, buildDataPoint, buildLog bool
for _, item := range table {
switch item.Context {
case "", "resource":
@@ -91,6 +94,8 @@ func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.Te
buildSpan = true
case "metric":
buildMetric = true
+ case "datapoint":
+ buildDataPoint = true
case "log":
buildLog = true
}
@@ -126,6 +131,19 @@ func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.Te
)
if err == nil {
r.metricParser = parser
+ } else {
+ errs = errors.Join(errs, err)
+ }
+ }
+ if buildDataPoint {
+ parser, err := ottldatapoint.NewParser(
+ common.Functions[ottldatapoint.TransformContext](),
+ settings,
+ )
+ if err == nil {
+ r.dataPointParser = parser
+ } else {
+ errs = errors.Join(errs, err)
}
}
if buildLog {
@@ -216,6 +234,12 @@ func (r *router[C]) registerRouteConsumers() (err error) {
return err
}
route.metricStatement = statement
+ case "datapoint":
+ statement, err := r.dataPointParser.ParseStatement(item.Statement)
+ if err != nil {
+ return err
+ }
+ route.dataPointStatement = statement
case "log":
statement, err := r.logParser.ParseStatement(item.Statement)
if err != nil {