diff --git a/.chloggen/add_doris_to_build.yaml b/.chloggen/add_doris_to_build.yaml new file mode 100644 index 000000000000..9581aa1a17c9 --- /dev/null +++ b/.chloggen/add_doris_to_build.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: dorisexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Mark the Doris exporter as Alpha. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33479] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/exporter_prometheusremotewriteexporter_exemplar-int.yaml b/.chloggen/exporter_prometheusremotewriteexporter_exemplar-int.yaml new file mode 100644 index 000000000000..40ef4e29a8a0 --- /dev/null +++ b/.chloggen/exporter_prometheusremotewriteexporter_exemplar-int.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/prometheusremotewrite + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix exemplar handling when the exemplar is an integer value. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36657] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: Send metrics with exemplars as integer values now are correctly handled. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/opamp-extension-health-reporting.yaml b/.chloggen/opamp-extension-health-reporting.yaml new file mode 100644 index 000000000000..b0a0e7f209dc --- /dev/null +++ b/.chloggen/opamp-extension-health-reporting.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: opampextension + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Use status subscription for fine granular component health reporting + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35856] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/split-emf-log-when-buckets-larger-than-100.yaml b/.chloggen/split-emf-log-when-buckets-larger-than-100.yaml new file mode 100644 index 000000000000..da57f8624ea9 --- /dev/null +++ b/.chloggen/split-emf-log-when-buckets-larger-than-100.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: awsemfexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Split EMF log to multiple log splits when buckets larger than 100. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [242] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/test_testbed-batcher.yaml b/.chloggen/test_testbed-batcher.yaml new file mode 100644 index 000000000000..7d3c989362cd --- /dev/null +++ b/.chloggen/test_testbed-batcher.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: testbed + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add batcher performance tests + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36206] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.github/workflows/add-codeowners-to-pr.yml b/.github/workflows/add-codeowners-to-pr.yml index 12d3fced3674..4c63c8eb0029 100644 --- a/.github/workflows/add-codeowners-to-pr.yml +++ b/.github/workflows/add-codeowners-to-pr.yml @@ -7,7 +7,7 @@ on: jobs: add-owners-to-pr: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' && github.repository_owner == 'open-telemetry' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/add-labels.yml b/.github/workflows/add-labels.yml index f9742654b507..ec5f20667a32 100644 --- a/.github/workflows/add-labels.yml +++ b/.github/workflows/add-labels.yml @@ -7,7 +7,7 @@ jobs: add-labels: if: ${{ !github.event.issue.pull_request && startsWith(github.event.comment.body, '/label') && github.repository_owner == 'open-telemetry' }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/auto-assign-owners.yml b/.github/workflows/auto-assign-owners.yml index b869cfb421b2..427e16ab5b4d 100644 --- a/.github/workflows/auto-assign-owners.yml +++ b/.github/workflows/auto-assign-owners.yml @@ -9,7 +9,7 @@ concurrency: jobs: add-owner: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - name: run diff --git a/.github/workflows/auto-update-jmx-metrics-component.yml b/.github/workflows/auto-update-jmx-metrics-component.yml index 8c16408f9778..eebac4d891b8 100644 --- a/.github/workflows/auto-update-jmx-metrics-component.yml +++ b/.github/workflows/auto-update-jmx-metrics-component.yml @@ -8,7 +8,7 @@ on: jobs: check-versions: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 outputs: latest-version: ${{ steps.check-versions.outputs.latest-version }} already-added: ${{ steps.check-versions.outputs.already-added }} @@ -48,7 +48,7 @@ jobs: echo "already-opened=$already_opened" >> $GITHUB_OUTPUT update-jmx-metrics-component: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: | needs.check-versions.outputs.already-added != 'true' && needs.check-versions.outputs.already-opened != 'true' diff --git a/.github/workflows/build-and-test-arm.yml b/.github/workflows/build-and-test-arm.yml index 285a79e222aa..b038cc225e54 100644 --- a/.github/workflows/build-and-test-arm.yml +++ b/.github/workflows/build-and-test-arm.yml @@ -71,7 +71,7 @@ jobs: run: make -j2 gotest GROUP=${{ matrix.group }} arm-unittest: if: ${{ github.actor != 'dependabot[bot]' && (contains(github.event.pull_request.labels.*.name, 'Run ARM') || github.event_name == 'push' || github.event_name == 'merge_group') }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [arm-unittest-matrix] steps: - name: Print result diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 858868bd789c..a53f3c70b68a 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -20,7 +20,7 @@ concurrency: jobs: setup-environment: timeout-minutes: 30 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - uses: actions/checkout@v4 @@ -45,7 +45,7 @@ jobs: if: steps.go-cache.outputs.cache-hit != 'true' run: make install-tools check-collector-module-version: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -53,7 +53,7 @@ jobs: - name: Check Collector Module Version run: ./.github/workflows/scripts/check-collector-module-version.sh check-codeowners: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -88,7 +88,7 @@ jobs: - cmd-0 - cmd-1 - other - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -121,7 +121,7 @@ jobs: run: GOOS=${{ matrix.goos }} GOARCH=amd64 make -j2 golint GROUP=${{ matrix.group }} lint: if: ${{ github.actor != 'dependabot[bot]' && always() }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment, lint-matrix] steps: - name: Print result @@ -156,7 +156,7 @@ jobs: - pkg - cmd-0 - cmd-1 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 30 steps: - name: Checkout Repo @@ -182,7 +182,7 @@ jobs: - name: Run `govulncheck` run: make -j2 gogovulncheck GROUP=${{ matrix.group }} checks: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -229,11 +229,6 @@ jobs: run: | make gendistributions git diff -s --exit-code || (echo 'Generated code is out of date, please run "make gendistributions" and commit the changes in this PR.' && exit 1) - - name: Gen CODEOWNERS - if: (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) && github.repository == 'open-telemetry/opentelemetry-collector-contrib' - run: | - GITHUB_TOKEN=${{ secrets.READ_ORG_AND_USER_TOKEN }} make gengithub - git diff -s --exit-code || (echo 'Generated code is out of date, please apply this diff and commit the changes in this PR.' && git diff && exit 1) - name: CodeGen run: | make -j2 generate @@ -252,7 +247,7 @@ jobs: fail-fast: false matrix: go-version: ["1.23.0", "1.22.8"] # 1.20 is interpreted as 1.2 without quotes - runner: [ubuntu-latest] + runner: [ubuntu-24.04] group: - receiver-0 - receiver-1 @@ -313,7 +308,7 @@ jobs: path: ${{ matrix.group }}-coverage.txt unittest: if: ${{ github.actor != 'dependabot[bot]' && always() }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment, unittest-matrix] steps: - name: Print result @@ -328,7 +323,7 @@ jobs: false fi coverage: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [unittest] steps: - uses: actions/checkout@v4 @@ -362,7 +357,7 @@ jobs: - pkg - cmd-0 - cmd-1 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -385,7 +380,7 @@ jobs: integration-tests: if: ${{ github.actor != 'dependabot[bot]' && always() }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment, integration-tests-matrix] steps: - name: Print result @@ -401,7 +396,7 @@ jobs: fi correctness-traces: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -428,7 +423,7 @@ jobs: - name: Correctness run: make -C testbed run-correctness-traces-tests correctness-metrics: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -456,7 +451,7 @@ jobs: run: make -C testbed run-correctness-metrics-tests build-examples: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] steps: - uses: actions/checkout@v4 @@ -465,7 +460,7 @@ jobs: run: make build-examples cross-compile: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [setup-environment] strategy: fail-fast: false @@ -535,7 +530,7 @@ jobs: path: ./bin/* publish-check: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [lint, unittest, integration-tests] steps: - uses: actions/checkout@v4 @@ -549,7 +544,7 @@ jobs: id: check run: ./.github/workflows/scripts/verify-dist-files-exist.sh publish-dev: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [lint, unittest, integration-tests] if: (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) && github.repository == 'open-telemetry/opentelemetry-collector-contrib' steps: @@ -612,7 +607,7 @@ jobs: docker push otel/opentelemetry-collector-contrib-dev:$GITHUB_SHA docker push otel/opentelemetry-collector-contrib-dev:latest publish-stable: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [lint, unittest, integration-tests] if: startsWith(github.ref, 'refs/tags/v') && github.repository == 'open-telemetry/opentelemetry-collector-contrib' steps: @@ -630,7 +625,7 @@ jobs: # This job updates the "next release" milestone # to the latest released version and creates a new milestone # named "next release" in its place - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [publish-stable] if: startsWith(github.ref, 'refs/tags/v') && github.repository == 'open-telemetry/opentelemetry-collector-contrib' steps: diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 97a9d78c2b3d..f2966c9065ff 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -24,7 +24,7 @@ concurrency: jobs: changelog: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} env: PR_HEAD: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/check-codeowners.yaml b/.github/workflows/check-codeowners.yaml new file mode 100644 index 000000000000..ec61d9aca6ff --- /dev/null +++ b/.github/workflows/check-codeowners.yaml @@ -0,0 +1,60 @@ +name: codeowners +on: + push: + branches: [main] + tags: + - "v[0-9]+.[0-9]+.[0-9]+*" + pull_request_target: + types: + - opened + - synchronize + - edited + - reopened +env: + # Make sure to exit early if cache segment download times out after 2 minutes. + # We limit cache download as a whole to 5 minutes. + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 + +# Do not cancel this workflow on main. See https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/16616 +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + setup-environment: + timeout-minutes: 30 + runs-on: ubuntu-latest + if: ${{ github.actor != 'dependabot[bot]' }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.22.8" + cache: false + - name: Cache Go + id: go-cache + timeout-minutes: 5 + uses: actions/cache@v4 + with: + path: | + ~/go/bin + ~/go/pkg/mod + ./.tools + key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + check-codeowners: + runs-on: ubuntu-latest + needs: [setup-environment] + steps: + - uses: actions/checkout@v4 + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + path: pr + - name: Gen CODEOWNERS + if: github.repository == 'open-telemetry/opentelemetry-collector-contrib' + run: | + make githubgen-install + cd pr + GITHUB_TOKEN=${{ secrets.READ_ORG_AND_USER_TOKEN }} githubgen + git diff -s --exit-code || (echo 'Generated code is out of date, please apply this diff and commit the changes in this PR.' && git diff && exit 1) diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml index bbfd0bb2ed49..7b0dccb45d4a 100644 --- a/.github/workflows/check-links.yaml +++ b/.github/workflows/check-links.yaml @@ -16,7 +16,7 @@ env: jobs: changedfiles: name: changed files - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 env: PR_HEAD: ${{ github.event.pull_request.head.sha }} if: ${{ github.actor != 'dependabot[bot]' }} @@ -31,7 +31,7 @@ jobs: run: | echo "md=$(git diff --name-only --diff-filter=ACMRTUXB $(git merge-base origin/main $PR_HEAD) $PR_HEAD | grep .md$ | xargs)" >> $GITHUB_OUTPUT check-links: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: changedfiles if: ${{needs.changedfiles.outputs.md}} steps: diff --git a/.github/workflows/close-stale.yaml b/.github/workflows/close-stale.yaml index cb178607eda5..c50fc3b0a0fe 100644 --- a/.github/workflows/close-stale.yaml +++ b/.github/workflows/close-stale.yaml @@ -6,7 +6,7 @@ on: jobs: stale: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 env: GH_TOKEN: ${{ github.token }} steps: diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml index 865b45366147..deeec0d8373e 100644 --- a/.github/workflows/e2e-tests.yml +++ b/.github/workflows/e2e-tests.yml @@ -19,7 +19,7 @@ env: SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 jobs: collector-build: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - name: Checkout @@ -51,7 +51,7 @@ jobs: path: ./bin/* supervisor-test: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: collector-build steps: - uses: actions/checkout@v4 @@ -83,7 +83,7 @@ jobs: go test -v --tags=e2e docker-build: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout uses: actions/checkout@v4 @@ -131,7 +131,7 @@ jobs: - processor/k8sattributesprocessor - receiver/kubeletstatsreceiver - receiver/k8sobjectsreceiver - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: docker-build steps: - uses: actions/checkout@v4 @@ -179,7 +179,7 @@ jobs: kubernetes-test: if: ${{ github.actor != 'dependabot[bot]' && always() }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [kubernetes-test-matrix] steps: - name: Print result diff --git a/.github/workflows/generate-component-labels.yml b/.github/workflows/generate-component-labels.yml index c7c194677d28..5938aa5621aa 100644 --- a/.github/workflows/generate-component-labels.yml +++ b/.github/workflows/generate-component-labels.yml @@ -10,7 +10,7 @@ on: jobs: generate-component-labels: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/generate-weekly-report.yml b/.github/workflows/generate-weekly-report.yml index d58b25c651eb..2ff2648f5e74 100644 --- a/.github/workflows/generate-weekly-report.yml +++ b/.github/workflows/generate-weekly-report.yml @@ -10,7 +10,7 @@ on: jobs: get_issues: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/load-tests.yml b/.github/workflows/load-tests.yml index d6e50cec4f92..03ddfe36bfed 100644 --- a/.github/workflows/load-tests.yml +++ b/.github/workflows/load-tests.yml @@ -119,7 +119,7 @@ jobs: path: testbed/tests/results/${{steps.filename.outputs.name}}.json update-benchmarks: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [loadtest] if: github.event_name != 'pull_request' steps: diff --git a/.github/workflows/mark-issues-as-stale.yml b/.github/workflows/mark-issues-as-stale.yml index 8cb4c88d35f7..4beaa9afe4a0 100644 --- a/.github/workflows/mark-issues-as-stale.yml +++ b/.github/workflows/mark-issues-as-stale.yml @@ -5,7 +5,7 @@ on: jobs: mark-issues-as-stale: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/milestone-add-to-pr.yml b/.github/workflows/milestone-add-to-pr.yml index eba56e603175..ec808bbf849c 100644 --- a/.github/workflows/milestone-add-to-pr.yml +++ b/.github/workflows/milestone-add-to-pr.yml @@ -11,7 +11,7 @@ on: jobs: update-pr: if: github.event.pull_request.merged - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/github-script@v7 with: diff --git a/.github/workflows/ping-codeowners-issues.yml b/.github/workflows/ping-codeowners-issues.yml index 9a58d23f1f69..aa7e2090e338 100644 --- a/.github/workflows/ping-codeowners-issues.yml +++ b/.github/workflows/ping-codeowners-issues.yml @@ -5,7 +5,7 @@ on: jobs: ping-owners: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/ping-codeowners-on-new-issue.yml b/.github/workflows/ping-codeowners-on-new-issue.yml index f4a2025afe9d..4a1b52f48500 100644 --- a/.github/workflows/ping-codeowners-on-new-issue.yml +++ b/.github/workflows/ping-codeowners-on-new-issue.yml @@ -5,7 +5,7 @@ on: jobs: ping-owners-on-new-issue: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/ping-codeowners-prs.yml b/.github/workflows/ping-codeowners-prs.yml index 40e6c46c83e1..29b697818aa9 100644 --- a/.github/workflows/ping-codeowners-prs.yml +++ b/.github/workflows/ping-codeowners-prs.yml @@ -5,7 +5,7 @@ on: jobs: ping-owners: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' && github.repository_owner == 'open-telemetry' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml index 597f6e3ff2af..f0f5e86f3d22 100644 --- a/.github/workflows/prepare-release.yml +++ b/.github/workflows/prepare-release.yml @@ -15,7 +15,7 @@ on: jobs: # Releasing opentelemetry-collector-contrib prepare-release: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/prometheus-compliance-tests.yml b/.github/workflows/prometheus-compliance-tests.yml index 9557274a088f..4d46370937b8 100644 --- a/.github/workflows/prometheus-compliance-tests.yml +++ b/.github/workflows/prometheus-compliance-tests.yml @@ -23,7 +23,7 @@ env: jobs: prometheus-compliance-tests: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index ddcf071147b2..711610bc60a2 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -9,7 +9,7 @@ permissions: {} jobs: shellcheck: name: shellcheck - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 env: VERSION: v0.10.0 steps: diff --git a/.github/workflows/telemetrygen.yml b/.github/workflows/telemetrygen.yml index 555820eb1c6b..b80245e3c62f 100644 --- a/.github/workflows/telemetrygen.yml +++ b/.github/workflows/telemetrygen.yml @@ -22,7 +22,7 @@ concurrency: jobs: build-dev: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - uses: actions/checkout@v4 @@ -59,7 +59,7 @@ jobs: platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le publish-latest: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) && github.repository == 'open-telemetry/opentelemetry-collector-contrib' permissions: packages: write @@ -104,7 +104,7 @@ jobs: platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le publish-stable: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: startsWith(github.ref, 'refs/tags/v') && github.repository == 'open-telemetry/opentelemetry-collector-contrib' permissions: packages: write diff --git a/.github/workflows/tidy-dependencies.yml b/.github/workflows/tidy-dependencies.yml index 8853bf3ed097..a35e5c882ed1 100644 --- a/.github/workflows/tidy-dependencies.yml +++ b/.github/workflows/tidy-dependencies.yml @@ -13,7 +13,7 @@ env: jobs: setup-environment: timeout-minutes: 30 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependency-major-update') && (github.actor == 'renovate[bot]' || contains(github.event.pull_request.labels.*.name, 'renovatebot')) }} steps: - uses: actions/checkout@v4 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c59cd50af7f..77de43683da9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5153,7 +5153,7 @@ This version has been skipped. ### 🚀 New components 🚀 - `telemetryquerylanguage`: Expose the telemetry query language as a package. (#11751) -- `chronyreceiver`: -| This component is a pure go implementation for capturing data from [chrony](https://chrony.tuxfamily.org/) (#11789) +- `chronyreceiver`: -| This component is a pure go implementation for capturing data from [chrony](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/system_administrators_guide/ch-configuring_ntp_using_the_chrony_suite) (#11789) - `otlpjsonfilereceiver`: Add a new file receiver reading JSON-encoded OTLP data, after [serialization specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/file-exporter.md#json-file-serialization) (#10836) - `pulsarexporter`: Add Apache Pulsar exporter (#9795) - `solacereceiver`: Add Solace receiver to receive trace data from a Solace PubSub+ Event Broker. (#10572) diff --git a/Makefile.Common b/Makefile.Common index c5459458f79f..3b742e42a2ba 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -57,7 +57,7 @@ $(TOOLS_BIN_DIR): mkdir -p $@ $(TOOLS_BIN_NAMES): $(TOOLS_BIN_DIR) $(TOOLS_MOD_DIR)/go.mod - cd $(TOOLS_MOD_DIR) && GOOS="" $(GOCMD) build -o $@ -trimpath $(filter %/$(notdir $@),$(TOOLS_PKG_NAMES)) + cd $(TOOLS_MOD_DIR) && GOOS="" GOARCH="" $(GOCMD) build -o $@ -trimpath $(filter %/$(notdir $@),$(TOOLS_PKG_NAMES)) ADDLICENSE := $(TOOLS_BIN_DIR)/addlicense MDLINKCHECK := $(TOOLS_BIN_DIR)/markdown-link-check diff --git a/cmd/opampsupervisor/e2e_test.go b/cmd/opampsupervisor/e2e_test.go index 56f0b8cae361..0ce4f8880991 100644 --- a/cmd/opampsupervisor/e2e_test.go +++ b/cmd/opampsupervisor/e2e_test.go @@ -940,7 +940,7 @@ func TestSupervisorRestartCommand(t *testing.T) { return health.Healthy && health.LastError == "" } return false - }, 10*time.Second, 250*time.Millisecond, "Collector never reported healthy after restart") + }, 30*time.Second, 250*time.Millisecond, "Collector never reported healthy after restart") } func TestSupervisorOpAMPConnectionSettings(t *testing.T) { @@ -1348,13 +1348,14 @@ func TestSupervisorStopsAgentProcessWithEmptyConfigMap(t *testing.T) { } // Verify the collector is not running after 250 ms by checking the healthcheck endpoint - time.Sleep(250 * time.Millisecond) - _, err := http.DefaultClient.Get("http://localhost:12345") - if runtime.GOOS != "windows" { - require.ErrorContains(t, err, "connection refused") - } else { - require.ErrorContains(t, err, "No connection could be made") - } + require.EventuallyWithT(t, func(tt *assert.CollectT) { + _, err := http.DefaultClient.Get("http://localhost:12345") + if runtime.GOOS != "windows" { + assert.ErrorContains(tt, err, "connection refused") + } else { + assert.ErrorContains(tt, err, "No connection could be made") + } + }, 3*time.Second, 250*time.Millisecond) } type LogEntry struct { diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index 3bb6ac169b7f..1aaf44a38c55 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -68,6 +68,7 @@ exporters: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/coralogixexporter v0.115.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.115.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datasetexporter v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dorisexporter v0.115.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter v0.115.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.115.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.115.0 @@ -504,3 +505,4 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/status => ../../pkg/status - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awss3receiver => ../../receiver/awss3receiver - github.com/DataDog/datadog-api-client-go/v2 => github.com/DataDog/datadog-api-client-go/v2 v2.31.0 + - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dorisexporter => ../../exporter/dorisexporter diff --git a/cmd/oteltestbedcol/builder-config.yaml b/cmd/oteltestbedcol/builder-config.yaml index 0de7be2ac3b1..f3173eb11126 100644 --- a/cmd/oteltestbedcol/builder-config.yaml +++ b/cmd/oteltestbedcol/builder-config.yaml @@ -33,6 +33,8 @@ processors: - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 receivers: - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 @@ -100,6 +102,8 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter => ../../exporter/sapmexporter - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/syslogreceiver => ../../receiver/syslogreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor => ../../processor/resourceprocessor + - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor => ../../processor/transformprocessor + - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor => ../../processor/filterprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter => ../../exporter/carbonexporter - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => ../../pkg/translator/prometheus - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden diff --git a/exporter/alertmanagerexporter/config_test.go b/exporter/alertmanagerexporter/config_test.go index 07802fbc3e5a..0766c02704b6 100644 --- a/exporter/alertmanagerexporter/config_test.go +++ b/exporter/alertmanagerexporter/config_test.go @@ -4,7 +4,6 @@ package alertmanagerexporter import ( - "net/http" "path/filepath" "testing" "time" @@ -24,7 +23,6 @@ import ( ) func TestLoadConfig(t *testing.T) { - defaultTransport := http.DefaultTransport.(*http.Transport) t.Parallel() cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) @@ -63,26 +61,24 @@ func TestLoadConfig(t *testing.T) { NumConsumers: 2, QueueSize: 10, }, - ClientConfig: confighttp.ClientConfig{ - Headers: map[string]configopaque.String{ + ClientConfig: func() confighttp.ClientConfig { + client := confighttp.NewDefaultClientConfig() + client.Headers = map[string]configopaque.String{ "can you have a . here?": "F0000000-0000-0000-0000-000000000000", "header1": "234", "another": "somevalue", - }, - Endpoint: "a.new.alertmanager.target:9093", - TLSSetting: configtls.ClientConfig{ + } + client.Endpoint = "a.new.alertmanager.target:9093" + client.TLSSetting = configtls.ClientConfig{ Config: configtls.Config{ CAFile: "/var/lib/mycert.pem", }, - }, - ReadBufferSize: 0, - WriteBufferSize: 524288, - Timeout: time.Second * 10, - MaxIdleConns: &defaultTransport.MaxIdleConns, - MaxIdleConnsPerHost: &defaultTransport.MaxIdleConnsPerHost, - MaxConnsPerHost: &defaultTransport.MaxConnsPerHost, - IdleConnTimeout: &defaultTransport.IdleConnTimeout, - }, + } + client.ReadBufferSize = 0 + client.WriteBufferSize = 524288 + client.Timeout = time.Second * 10 + return client + }(), }, }, } diff --git a/exporter/awsemfexporter/datapoint.go b/exporter/awsemfexporter/datapoint.go index 001d83ab2b78..36d16b62de88 100644 --- a/exporter/awsemfexporter/datapoint.go +++ b/exporter/awsemfexporter/datapoint.go @@ -109,6 +109,33 @@ type summaryMetricEntry struct { count uint64 } +// dataPointSplit is a structure used to manage segments of data points split from a histogram. +// It is not safe for concurrent use. +type dataPointSplit struct { + cWMetricHistogram *cWMetricHistogram + length int + capacity int +} + +func (split *dataPointSplit) isFull() bool { + return split.length >= split.capacity +} + +func (split *dataPointSplit) setMax(maxVal float64) { + split.cWMetricHistogram.Max = maxVal +} + +func (split *dataPointSplit) setMin(minVal float64) { + split.cWMetricHistogram.Min = minVal +} + +func (split *dataPointSplit) appendMetricData(metricVal float64, count uint64) { + split.cWMetricHistogram.Values = append(split.cWMetricHistogram.Values, metricVal) + split.cWMetricHistogram.Counts = append(split.cWMetricHistogram.Counts, float64(count)) + split.length++ + split.cWMetricHistogram.Count += count +} + // CalculateDeltaDatapoints retrieves the NumberDataPoint at the given index and performs rate/delta calculation if necessary. func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool, calculators *emfCalculators) ([]dataPoint, bool) { metric := dps.NumberDataPointSlice.At(i) @@ -193,58 +220,171 @@ func (dps histogramDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) { } // CalculateDeltaDatapoints retrieves the ExponentialHistogramDataPoint at the given index. +// As CloudWatch EMF logs allows in maximum of 100 target members, the exponential histogram metric are split into multiple data points as needed, +// each containing a maximum of 100 buckets, to comply with CloudWatch EMF log constraints. +// Note that the number of values and counts in each split may not be less than splitThreshold as we are only adding non-zero bucket counts. +// +// For each split data point: +// - Min and Max values are recalculated based on the bucket boundary within that specific split. +// - Sum is only assigned to the first split to ensure the total sum of the datapoints after aggregation is correct. +// - Count is accumulated based on the bucket counts within each split. func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, instrumentationScopeName string, _ bool, _ *emfCalculators) ([]dataPoint, bool) { metric := dps.ExponentialHistogramDataPointSlice.At(idx) + const splitThreshold = 100 + currentBucketIndex := 0 + currentPositiveIndex := metric.Positive().BucketCounts().Len() - 1 + currentZeroIndex := 0 + currentNegativeIndex := 0 + var datapoints []dataPoint + totalBucketLen := metric.Positive().BucketCounts().Len() + metric.Negative().BucketCounts().Len() + if metric.ZeroCount() > 0 { + totalBucketLen++ + } + + for currentBucketIndex < totalBucketLen { + // Create a new dataPointSplit with a capacity of up to splitThreshold buckets + capacity := min(splitThreshold, totalBucketLen-currentBucketIndex) + + sum := 0.0 + // Only assign `Sum` if this is the first split to make sure the total sum of the datapoints after aggregation is correct. + if currentBucketIndex == 0 { + sum = metric.Sum() + } + + split := dataPointSplit{ + cWMetricHistogram: &cWMetricHistogram{ + Values: []float64{}, + Counts: []float64{}, + Max: metric.Max(), + Min: metric.Min(), + Count: 0, + Sum: sum, + }, + length: 0, + capacity: capacity, + } + + // Set collect values from positive buckets and save into split. + currentBucketIndex, currentPositiveIndex = collectDatapointsWithPositiveBuckets(&split, metric, currentBucketIndex, currentPositiveIndex) + // Set collect values from zero buckets and save into split. + currentBucketIndex, currentZeroIndex = collectDatapointsWithZeroBucket(&split, metric, currentBucketIndex, currentZeroIndex) + // Set collect values from negative buckets and save into split. + currentBucketIndex, currentNegativeIndex = collectDatapointsWithNegativeBuckets(&split, metric, currentBucketIndex, currentNegativeIndex) + + if split.length > 0 { + // Add the current split to the datapoints list + datapoints = append(datapoints, dataPoint{ + name: dps.metricName, + value: split.cWMetricHistogram, + labels: createLabels(metric.Attributes(), instrumentationScopeName), + timestampMs: unixNanoToMilliseconds(metric.Timestamp()), + }) + } + } + + if len(datapoints) == 0 { + return []dataPoint{{ + name: dps.metricName, + value: &cWMetricHistogram{ + Values: []float64{}, + Counts: []float64{}, + Count: metric.Count(), + Sum: metric.Sum(), + Max: metric.Max(), + Min: metric.Min(), + }, + labels: createLabels(metric.Attributes(), instrumentationScopeName), + timestampMs: unixNanoToMilliseconds(metric.Timestamp()), + }}, true + } + + // Override the min and max values of the first and last splits with the raw data of the metric. + datapoints[0].value.(*cWMetricHistogram).Max = metric.Max() + datapoints[len(datapoints)-1].value.(*cWMetricHistogram).Min = metric.Min() + + return datapoints, true +} + +func collectDatapointsWithPositiveBuckets(split *dataPointSplit, metric pmetric.ExponentialHistogramDataPoint, currentBucketIndex int, currentPositiveIndex int) (int, int) { + if split.isFull() || currentPositiveIndex < 0 { + return currentBucketIndex, currentPositiveIndex + } + scale := metric.Scale() base := math.Pow(2, math.Pow(2, float64(-scale))) - arrayValues := []float64{} - arrayCounts := []float64{} - var bucketBegin float64 - var bucketEnd float64 - - // Set mid-point of positive buckets in values/counts array. positiveBuckets := metric.Positive() positiveOffset := positiveBuckets.Offset() positiveBucketCounts := positiveBuckets.BucketCounts() - bucketBegin = 0 - bucketEnd = 0 - for i := 0; i < positiveBucketCounts.Len(); i++ { - index := i + int(positiveOffset) - if bucketBegin == 0 { - bucketBegin = math.Pow(base, float64(index)) + bucketBegin := 0.0 + bucketEnd := 0.0 + + for !split.isFull() && currentPositiveIndex >= 0 { + index := currentPositiveIndex + int(positiveOffset) + if bucketEnd == 0 { + bucketEnd = math.Pow(base, float64(index+1)) } else { - bucketBegin = bucketEnd + bucketEnd = bucketBegin } - bucketEnd = math.Pow(base, float64(index+1)) + bucketBegin = math.Pow(base, float64(index)) metricVal := (bucketBegin + bucketEnd) / 2 - count := positiveBucketCounts.At(i) + count := positiveBucketCounts.At(currentPositiveIndex) if count > 0 { - arrayValues = append(arrayValues, metricVal) - arrayCounts = append(arrayCounts, float64(count)) + split.appendMetricData(metricVal, count) + + // The value are append from high to low, set Max from the first bucket (highest value) and Min from the last bucket (lowest value) + if split.length == 1 { + split.setMax(bucketEnd) + } + if split.isFull() { + split.setMin(bucketBegin) + } } + currentBucketIndex++ + currentPositiveIndex-- } - // Set count of zero bucket in values/counts array. - if metric.ZeroCount() > 0 { - arrayValues = append(arrayValues, 0) - arrayCounts = append(arrayCounts, float64(metric.ZeroCount())) + return currentBucketIndex, currentPositiveIndex +} + +func collectDatapointsWithZeroBucket(split *dataPointSplit, metric pmetric.ExponentialHistogramDataPoint, currentBucketIndex int, currentZeroIndex int) (int, int) { + if metric.ZeroCount() > 0 && !split.isFull() && currentZeroIndex == 0 { + split.appendMetricData(0, metric.ZeroCount()) + + // The value are append from high to low, set Max from the first bucket (highest value) and Min from the last bucket (lowest value) + if split.length == 1 { + split.setMax(0) + } + if split.isFull() { + split.setMin(0) + } + currentZeroIndex++ + currentBucketIndex++ } - // Set mid-point of negative buckets in values/counts array. + return currentBucketIndex, currentZeroIndex +} + +func collectDatapointsWithNegativeBuckets(split *dataPointSplit, metric pmetric.ExponentialHistogramDataPoint, currentBucketIndex int, currentNegativeIndex int) (int, int) { // According to metrics spec, the value in histogram is expected to be non-negative. // https://opentelemetry.io/docs/specs/otel/metrics/api/#histogram // However, the negative support is defined in metrics data model. // https://opentelemetry.io/docs/specs/otel/metrics/data-model/#exponentialhistogram // The negative is also supported but only verified with unit test. + if split.isFull() || currentNegativeIndex >= metric.Negative().BucketCounts().Len() { + return currentBucketIndex, currentNegativeIndex + } + scale := metric.Scale() + base := math.Pow(2, math.Pow(2, float64(-scale))) negativeBuckets := metric.Negative() negativeOffset := negativeBuckets.Offset() negativeBucketCounts := negativeBuckets.BucketCounts() - bucketBegin = 0 - bucketEnd = 0 - for i := 0; i < negativeBucketCounts.Len(); i++ { - index := i + int(negativeOffset) + bucketBegin := 0.0 + bucketEnd := 0.0 + + for !split.isFull() && currentNegativeIndex < metric.Negative().BucketCounts().Len() { + index := currentNegativeIndex + int(negativeOffset) if bucketEnd == 0 { bucketEnd = -math.Pow(base, float64(index)) } else { @@ -252,26 +392,23 @@ func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, } bucketBegin = -math.Pow(base, float64(index+1)) metricVal := (bucketBegin + bucketEnd) / 2 - count := negativeBucketCounts.At(i) + count := negativeBucketCounts.At(currentNegativeIndex) if count > 0 { - arrayValues = append(arrayValues, metricVal) - arrayCounts = append(arrayCounts, float64(count)) + split.appendMetricData(metricVal, count) + + // The value are append from high to low, set Max from the first bucket (highest value) and Min from the last bucket (lowest value) + if split.length == 1 { + split.setMax(bucketEnd) + } + if split.isFull() { + split.setMin(bucketBegin) + } } + currentBucketIndex++ + currentNegativeIndex++ } - return []dataPoint{{ - name: dps.metricName, - value: &cWMetricHistogram{ - Values: arrayValues, - Counts: arrayCounts, - Count: metric.Count(), - Sum: metric.Sum(), - Max: metric.Max(), - Min: metric.Min(), - }, - labels: createLabels(metric.Attributes(), instrumentationScopeName), - timestampMs: unixNanoToMilliseconds(metric.Timestamp()), - }}, true + return currentBucketIndex, currentNegativeIndex } func (dps exponentialHistogramDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) { diff --git a/exporter/awsemfexporter/datapoint_test.go b/exporter/awsemfexporter/datapoint_test.go index 911ecc48e24e..fb02857c6fb0 100644 --- a/exporter/awsemfexporter/datapoint_test.go +++ b/exporter/awsemfexporter/datapoint_test.go @@ -7,6 +7,7 @@ import ( "fmt" "math" "reflect" + "strconv" "testing" "time" @@ -244,6 +245,65 @@ func generateTestExponentialHistogramMetricWithInfs(name string) pmetric.Metrics return otelMetrics } +func generateTestExponentialHistogramMetricWithLongBuckets(name string) pmetric.Metrics { + otelMetrics := pmetric.NewMetrics() + rs := otelMetrics.ResourceMetrics().AppendEmpty() + metrics := rs.ScopeMetrics().AppendEmpty().Metrics() + metric := metrics.AppendEmpty() + metric.SetName(name) + metric.SetUnit("Seconds") + exponentialHistogramMetric := metric.SetEmptyExponentialHistogram() + + exponentialHistogramDatapoint := exponentialHistogramMetric.DataPoints().AppendEmpty() + exponentialHistogramDatapoint.SetCount(3662) + exponentialHistogramDatapoint.SetSum(1000) + exponentialHistogramDatapoint.SetMin(-9e+17) + exponentialHistogramDatapoint.SetMax(9e+17) + exponentialHistogramDatapoint.SetZeroCount(2) + posBucketCounts := make([]uint64, 60) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i + 1) + } + exponentialHistogramDatapoint.Positive().BucketCounts().FromRaw(posBucketCounts) + negBucketCounts := make([]uint64, 60) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i + 1) + } + exponentialHistogramDatapoint.Negative().BucketCounts().FromRaw(negBucketCounts) + exponentialHistogramDatapoint.Attributes().PutStr("label1", "value1") + return otelMetrics +} + +func generateTestExponentialHistogramMetricWithSpecifiedNumberOfBuckets(name string, bucketLength int) pmetric.Metrics { + halfBucketLength := bucketLength / 2 + otelMetrics := pmetric.NewMetrics() + rs := otelMetrics.ResourceMetrics().AppendEmpty() + metrics := rs.ScopeMetrics().AppendEmpty().Metrics() + metric := metrics.AppendEmpty() + metric.SetName(name) + metric.SetUnit("Seconds") + exponentialHistogramMetric := metric.SetEmptyExponentialHistogram() + + exponentialHistogramDatapoint := exponentialHistogramMetric.DataPoints().AppendEmpty() + exponentialHistogramDatapoint.SetCount(250550) + exponentialHistogramDatapoint.SetSum(10000) + exponentialHistogramDatapoint.SetMin(-9e+20) + exponentialHistogramDatapoint.SetMax(9e+20) + exponentialHistogramDatapoint.SetZeroCount(50) + posBucketCounts := make([]uint64, halfBucketLength) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i + 1) + } + exponentialHistogramDatapoint.Positive().BucketCounts().FromRaw(posBucketCounts) + negBucketCounts := make([]uint64, halfBucketLength) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i + 1) + } + exponentialHistogramDatapoint.Negative().BucketCounts().FromRaw(negBucketCounts) + exponentialHistogramDatapoint.Attributes().PutStr("label1", "value1") + return otelMetrics +} + func generateTestSummaryMetric(name string) pmetric.Metrics { otelMetrics := pmetric.NewMetrics() rs := otelMetrics.ResourceMetrics().AppendEmpty() @@ -841,7 +901,7 @@ func TestCalculateDeltaDatapoints_ExponentialHistogramDataPointSlice(t *testing. }(), expectedDatapoint: dataPoint{ name: "foo", - value: &cWMetricHistogram{Values: []float64{1.5, 3, 6, 0, -1.5, -3, -6}, Counts: []float64{1, 2, 3, 4, 1, 2, 3}}, + value: &cWMetricHistogram{Values: []float64{6, 3, 1.5, 0, -1.5, -3, -6}, Counts: []float64{3, 2, 1, 4, 1, 2, 3}, Count: 16}, labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, }, }, @@ -862,7 +922,7 @@ func TestCalculateDeltaDatapoints_ExponentialHistogramDataPointSlice(t *testing. }(), expectedDatapoint: dataPoint{ name: "foo", - value: &cWMetricHistogram{Values: []float64{0.625, 2.5, 10, 0, -0.625, -2.5, -10}, Counts: []float64{1, 2, 3, 4, 1, 2, 3}}, + value: &cWMetricHistogram{Values: []float64{10, 2.5, 0.625, 0, -0.625, -2.5, -10}, Counts: []float64{3, 2, 1, 4, 1, 2, 3}, Count: 16}, labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1", "label2": "value2"}, }, }, @@ -885,6 +945,543 @@ func TestCalculateDeltaDatapoints_ExponentialHistogramDataPointSlice(t *testing. } } +func TestCalculateDeltaDatapoints_ExponentialHistogramDataPointSliceWithSplitDataPoints(t *testing.T) { + dmd := generateDeltaMetricMetadata(false, "foo", false) + + testCases := []struct { + name string + histogramDPS pmetric.ExponentialHistogramDataPointSlice + expectedDatapoints []dataPoint + }{ + { + name: "Exponential histogram with more than 100 buckets, including positive, negative and zero buckets", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + posBucketCounts := make([]uint64, 60) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i + 1) + } + histogramDP.Positive().BucketCounts().FromRaw(posBucketCounts) + histogramDP.SetZeroCount(2) + negBucketCounts := make([]uint64, 60) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i + 1) + } + histogramDP.Negative().BucketCounts().FromRaw(negBucketCounts) + histogramDP.SetSum(1000) + histogramDP.SetMin(-9e+17) + histogramDP.SetMax(9e+17) + histogramDP.SetCount(uint64(3662)) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + expectedDatapoints: []dataPoint{ + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 8.646911284551352e+17, 4.323455642275676e+17, 2.161727821137838e+17, 1.080863910568919e+17, 5.404319552844595e+16, 2.7021597764222976e+16, + 1.3510798882111488e+16, 6.755399441055744e+15, 3.377699720527872e+15, 1.688849860263936e+15, 8.44424930131968e+14, 4.22212465065984e+14, + 2.11106232532992e+14, 1.05553116266496e+14, 5.2776558133248e+13, 2.6388279066624e+13, 1.3194139533312e+13, 6.597069766656e+12, 3.298534883328e+12, + 1.649267441664e+12, 8.24633720832e+11, 4.12316860416e+11, 2.06158430208e+11, 1.03079215104e+11, 5.1539607552e+10, 2.5769803776e+10, + 1.2884901888e+10, 6.442450944e+09, 3.221225472e+09, 1.610612736e+09, 8.05306368e+08, 4.02653184e+08, 2.01326592e+08, 1.00663296e+08, + 5.0331648e+07, 2.5165824e+07, 1.2582912e+07, 6.291456e+06, 3.145728e+06, 1.572864e+06, 786432, 393216, 196608, 98304, 49152, 24576, + 12288, 6144, 3072, 1536, 768, 384, 192, 96, 48, 24, 12, 6, 3, 1.5, 0, -1.5, -3, -6, -12, -24, -48, -96, -192, -384, -768, -1536, -3072, + -6144, -12288, -24576, -49152, -98304, -196608, -393216, -786432, -1.572864e+06, -3.145728e+06, -6.291456e+06, -1.2582912e+07, -2.5165824e+07, + -5.0331648e+07, -1.00663296e+08, -2.01326592e+08, -4.02653184e+08, -8.05306368e+08, -1.610612736e+09, -3.221225472e+09, -6.442450944e+09, + -1.2884901888e+10, -2.5769803776e+10, -5.1539607552e+10, -1.03079215104e+11, -2.06158430208e+11, -4.12316860416e+11, + }, + Counts: []float64{ + 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, + 6, 5, 4, 3, 2, 1, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, + }, + Sum: 1000, Count: 2612, Min: -5.49755813888e+11, Max: 9e+17, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + -8.24633720832e+11, -1.649267441664e+12, -3.298534883328e+12, -6.597069766656e+12, -1.3194139533312e+13, -2.6388279066624e+13, -5.2776558133248e+13, + -1.05553116266496e+14, -2.11106232532992e+14, -4.22212465065984e+14, -8.44424930131968e+14, -1.688849860263936e+15, -3.377699720527872e+15, + -6.755399441055744e+15, -1.3510798882111488e+16, -2.7021597764222976e+16, -5.404319552844595e+16, -1.080863910568919e+17, -2.161727821137838e+17, + -4.323455642275676e+17, -8.646911284551352e+17, + }, + Counts: []float64{40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60}, + Sum: 0, Count: 1050, Min: -9e+17, Max: -5.49755813888e+11, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + }, + }, + { + name: "Exponential histogram with more than 100 buckets, including positive and zero buckets", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + posBucketCounts := make([]uint64, 120) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i + 1) + } + histogramDP.Positive().BucketCounts().FromRaw(posBucketCounts) + histogramDP.SetZeroCount(2) + histogramDP.SetSum(10000) + histogramDP.SetMin(0) + histogramDP.SetMax(9e+36) + histogramDP.SetCount(uint64(7262)) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + expectedDatapoints: []dataPoint{ + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 9.969209968386869e+35, 4.9846049841934345e+35, 2.4923024920967173e+35, 1.2461512460483586e+35, 6.230756230241793e+34, + 3.1153781151208966e+34, 1.5576890575604483e+34, 7.788445287802241e+33, 3.894222643901121e+33, 1.9471113219505604e+33, + 9.735556609752802e+32, 4.867778304876401e+32, 2.4338891524382005e+32, 1.2169445762191002e+32, 6.084722881095501e+31, + 3.0423614405477506e+31, 1.5211807202738753e+31, 7.605903601369376e+30, 3.802951800684688e+30, 1.901475900342344e+30, + 9.50737950171172e+29, 4.75368975085586e+29, 2.37684487542793e+29, 1.188422437713965e+29, 5.942112188569825e+28, + 2.9710560942849127e+28, 1.4855280471424563e+28, 7.427640235712282e+27, 3.713820117856141e+27, 1.8569100589280704e+27, + 9.284550294640352e+26, 4.642275147320176e+26, 2.321137573660088e+26, 1.160568786830044e+26, 5.80284393415022e+25, + 2.90142196707511e+25, 1.450710983537555e+25, 7.253554917687775e+24, 3.6267774588438875e+24, 1.8133887294219438e+24, + 9.066943647109719e+23, 4.5334718235548594e+23, 2.2667359117774297e+23, 1.1333679558887149e+23, 5.666839779443574e+22, + 2.833419889721787e+22, 1.4167099448608936e+22, 7.083549724304468e+21, 3.541774862152234e+21, 1.770887431076117e+21, + 8.854437155380585e+20, 4.4272185776902924e+20, 2.2136092888451462e+20, 1.1068046444225731e+20, 5.5340232221128655e+19, + 2.7670116110564327e+19, 1.3835058055282164e+19, 6.917529027641082e+18, 3.458764513820541e+18, 1.7293822569102705e+18, + 8.646911284551352e+17, 4.323455642275676e+17, 2.161727821137838e+17, 1.080863910568919e+17, 5.404319552844595e+16, + 2.7021597764222976e+16, 1.3510798882111488e+16, 6.755399441055744e+15, 3.377699720527872e+15, 1.688849860263936e+15, + 8.44424930131968e+14, 4.22212465065984e+14, 2.11106232532992e+14, 1.05553116266496e+14, 5.2776558133248e+13, + 2.6388279066624e+13, 1.3194139533312e+13, 6.597069766656e+12, 3.298534883328e+12, 1.649267441664e+12, 8.24633720832e+11, + 4.12316860416e+11, 2.06158430208e+11, 1.03079215104e+11, 5.1539607552e+10, 2.5769803776e+10, 1.2884901888e+10, + 6.442450944e+09, 3.221225472e+09, 1.610612736e+09, 8.05306368e+08, 4.02653184e+08, 2.01326592e+08, 1.00663296e+08, + 5.0331648e+07, 2.5165824e+07, 1.2582912e+07, 6.291456e+06, 3.145728e+06, 1.572864e+06, + }, + Counts: []float64{ + 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, + 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, + 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, + }, + Sum: 10000, Count: 7050, Min: 1.048576e+06, Max: 9e+36, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{786432, 393216, 196608, 98304, 49152, 24576, 12288, 6144, 3072, 1536, 768, 384, 192, 96, 48, 24, 12, 6, 3, 1.5, 0}, + Counts: []float64{20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 2}, + Sum: 0, Count: 212, Min: 0, Max: 1.048576e+06, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + }, + }, + { + name: "Exponential histogram with more than 100 buckets, including negative and zero buckets", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + negBucketCounts := make([]uint64, 120) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i + 1) + } + histogramDP.Negative().BucketCounts().FromRaw(negBucketCounts) + histogramDP.SetZeroCount(2) + histogramDP.SetSum(10000) + histogramDP.SetMin(-9e+36) + histogramDP.SetMax(0) + histogramDP.SetCount(uint64(7262)) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + expectedDatapoints: []dataPoint{ + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 0, -1.5, -3, -6, -12, -24, -48, -96, -192, -384, -768, -1536, -3072, -6144, -12288, -24576, + -49152, -98304, -196608, -393216, -786432, -1.572864e+06, -3.145728e+06, -6.291456e+06, -1.2582912e+07, + -2.5165824e+07, -5.0331648e+07, -1.00663296e+08, -2.01326592e+08, -4.02653184e+08, -8.05306368e+08, + -1.610612736e+09, -3.221225472e+09, -6.442450944e+09, -1.2884901888e+10, -2.5769803776e+10, + -5.1539607552e+10, -1.03079215104e+11, -2.06158430208e+11, -4.12316860416e+11, -8.24633720832e+11, + -1.649267441664e+12, -3.298534883328e+12, -6.597069766656e+12, -1.3194139533312e+13, -2.6388279066624e+13, + -5.2776558133248e+13, -1.05553116266496e+14, -2.11106232532992e+14, -4.22212465065984e+14, -8.44424930131968e+14, + -1.688849860263936e+15, -3.377699720527872e+15, -6.755399441055744e+15, -1.3510798882111488e+16, + -2.7021597764222976e+16, -5.404319552844595e+16, -1.080863910568919e+17, -2.161727821137838e+17, + -4.323455642275676e+17, -8.646911284551352e+17, -1.7293822569102705e+18, -3.458764513820541e+18, + -6.917529027641082e+18, -1.3835058055282164e+19, -2.7670116110564327e+19, -5.5340232221128655e+19, + -1.1068046444225731e+20, -2.2136092888451462e+20, -4.4272185776902924e+20, -8.854437155380585e+20, + -1.770887431076117e+21, -3.541774862152234e+21, -7.083549724304468e+21, -1.4167099448608936e+22, + -2.833419889721787e+22, -5.666839779443574e+22, -1.1333679558887149e+23, -2.2667359117774297e+23, + -4.5334718235548594e+23, -9.066943647109719e+23, -1.8133887294219438e+24, -3.6267774588438875e+24, + -7.253554917687775e+24, -1.450710983537555e+25, -2.90142196707511e+25, -5.80284393415022e+25, + -1.160568786830044e+26, -2.321137573660088e+26, -4.642275147320176e+26, -9.284550294640352e+26, + -1.8569100589280704e+27, -3.713820117856141e+27, -7.427640235712282e+27, -1.4855280471424563e+28, + -2.9710560942849127e+28, -5.942112188569825e+28, -1.188422437713965e+29, -2.37684487542793e+29, -4.75368975085586e+29, + }, + Counts: []float64{ + 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + }, + Sum: 10000, Count: 4952, Min: -6.338253001141147e+29, Max: 0, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + -9.50737950171172e+29, -1.901475900342344e+30, -3.802951800684688e+30, -7.605903601369376e+30, + -1.5211807202738753e+31, -3.0423614405477506e+31, -6.084722881095501e+31, -1.2169445762191002e+32, + -2.4338891524382005e+32, -4.867778304876401e+32, -9.735556609752802e+32, -1.9471113219505604e+33, -3.894222643901121e+33, + -7.788445287802241e+33, -1.5576890575604483e+34, -3.1153781151208966e+34, -6.230756230241793e+34, -1.2461512460483586e+35, + -2.4923024920967173e+35, -4.9846049841934345e+35, -9.969209968386869e+35, + }, + Counts: []float64{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120}, + Sum: 0, Count: 2310, Min: -9e+36, Max: -6.338253001141147e+29, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + }, + }, + { + name: "Exponential histogram with more than 100 buckets, including positive and negative buckets", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + posBucketCounts := make([]uint64, 60) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i + 1) + } + histogramDP.Positive().BucketCounts().FromRaw(posBucketCounts) + negBucketCounts := make([]uint64, 60) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i + 1) + } + histogramDP.Negative().BucketCounts().FromRaw(negBucketCounts) + histogramDP.SetSum(1000) + histogramDP.SetMin(-9e+17) + histogramDP.SetMax(9e+17) + histogramDP.SetCount(uint64(3660)) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + expectedDatapoints: []dataPoint{ + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 8.646911284551352e+17, 4.323455642275676e+17, 2.161727821137838e+17, 1.080863910568919e+17, 5.404319552844595e+16, 2.7021597764222976e+16, + 1.3510798882111488e+16, 6.755399441055744e+15, 3.377699720527872e+15, 1.688849860263936e+15, 8.44424930131968e+14, 4.22212465065984e+14, + 2.11106232532992e+14, 1.05553116266496e+14, 5.2776558133248e+13, 2.6388279066624e+13, 1.3194139533312e+13, 6.597069766656e+12, 3.298534883328e+12, + 1.649267441664e+12, 8.24633720832e+11, 4.12316860416e+11, 2.06158430208e+11, 1.03079215104e+11, 5.1539607552e+10, 2.5769803776e+10, + 1.2884901888e+10, 6.442450944e+09, 3.221225472e+09, 1.610612736e+09, 8.05306368e+08, 4.02653184e+08, 2.01326592e+08, 1.00663296e+08, + 5.0331648e+07, 2.5165824e+07, 1.2582912e+07, 6.291456e+06, 3.145728e+06, 1.572864e+06, 786432, 393216, 196608, 98304, 49152, 24576, + 12288, 6144, 3072, 1536, 768, 384, 192, 96, 48, 24, 12, 6, 3, 1.5, -1.5, -3, -6, -12, -24, -48, -96, -192, -384, -768, -1536, -3072, + -6144, -12288, -24576, -49152, -98304, -196608, -393216, -786432, -1.572864e+06, -3.145728e+06, -6.291456e+06, -1.2582912e+07, -2.5165824e+07, + -5.0331648e+07, -1.00663296e+08, -2.01326592e+08, -4.02653184e+08, -8.05306368e+08, -1.610612736e+09, -3.221225472e+09, -6.442450944e+09, + -1.2884901888e+10, -2.5769803776e+10, -5.1539607552e+10, -1.03079215104e+11, -2.06158430208e+11, -4.12316860416e+11, -8.24633720832e+11, + }, + Counts: []float64{ + 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, + 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, + }, + Sum: 1000, Count: 2650, Min: -1.099511627776e+12, Max: 9e+17, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + -1.649267441664e+12, -3.298534883328e+12, -6.597069766656e+12, -1.3194139533312e+13, -2.6388279066624e+13, -5.2776558133248e+13, + -1.05553116266496e+14, -2.11106232532992e+14, -4.22212465065984e+14, -8.44424930131968e+14, -1.688849860263936e+15, -3.377699720527872e+15, + -6.755399441055744e+15, -1.3510798882111488e+16, -2.7021597764222976e+16, -5.404319552844595e+16, -1.080863910568919e+17, -2.161727821137838e+17, + -4.323455642275676e+17, -8.646911284551352e+17, + }, + Counts: []float64{41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60}, + Sum: 0, Count: 1010, Min: -9e+17, Max: -1.099511627776e+12, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + }, + }, + { + name: "Exponential histogram with exact 200 buckets, including positive, negative buckets", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + posBucketCounts := make([]uint64, 100) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i + 1) + } + histogramDP.Positive().BucketCounts().FromRaw(posBucketCounts) + negBucketCounts := make([]uint64, 100) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i + 1) + } + histogramDP.Negative().BucketCounts().FromRaw(negBucketCounts) + histogramDP.SetSum(100000) + histogramDP.SetMin(-9e+36) + histogramDP.SetMax(9e+36) + histogramDP.SetCount(uint64(3662)) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + expectedDatapoints: []dataPoint{ + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 9.50737950171172e+29, 4.75368975085586e+29, 2.37684487542793e+29, 1.188422437713965e+29, 5.942112188569825e+28, + 2.9710560942849127e+28, 1.4855280471424563e+28, 7.427640235712282e+27, 3.713820117856141e+27, 1.8569100589280704e+27, + 9.284550294640352e+26, 4.642275147320176e+26, 2.321137573660088e+26, 1.160568786830044e+26, 5.80284393415022e+25, + 2.90142196707511e+25, 1.450710983537555e+25, 7.253554917687775e+24, 3.6267774588438875e+24, 1.8133887294219438e+24, + 9.066943647109719e+23, 4.5334718235548594e+23, 2.2667359117774297e+23, 1.1333679558887149e+23, 5.666839779443574e+22, + 2.833419889721787e+22, 1.4167099448608936e+22, 7.083549724304468e+21, 3.541774862152234e+21, 1.770887431076117e+21, + 8.854437155380585e+20, 4.4272185776902924e+20, 2.2136092888451462e+20, 1.1068046444225731e+20, 5.5340232221128655e+19, + 2.7670116110564327e+19, 1.3835058055282164e+19, 6.917529027641082e+18, 3.458764513820541e+18, 1.7293822569102705e+18, + 8.646911284551352e+17, 4.323455642275676e+17, 2.161727821137838e+17, 1.080863910568919e+17, 5.404319552844595e+16, + 2.7021597764222976e+16, 1.3510798882111488e+16, 6.755399441055744e+15, 3.377699720527872e+15, 1.688849860263936e+15, + 8.44424930131968e+14, 4.22212465065984e+14, 2.11106232532992e+14, 1.05553116266496e+14, 5.2776558133248e+13, + 2.6388279066624e+13, 1.3194139533312e+13, 6.597069766656e+12, 3.298534883328e+12, 1.649267441664e+12, 8.24633720832e+11, + 4.12316860416e+11, 2.06158430208e+11, 1.03079215104e+11, 5.1539607552e+10, 2.5769803776e+10, 1.2884901888e+10, 6.442450944e+09, + 3.221225472e+09, 1.610612736e+09, 8.05306368e+08, 4.02653184e+08, 2.01326592e+08, 1.00663296e+08, 5.0331648e+07, + 2.5165824e+07, 1.2582912e+07, 6.291456e+06, 3.145728e+06, 1.572864e+06, 786432, 393216, 196608, 98304, 49152, 24576, 12288, + 6144, 3072, 1536, 768, 384, 192, 96, 48, 24, 12, 6, 3, 1.5, + }, + Counts: []float64{ + 100, 99, 98, 97, 96, 95, 94, + 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, + 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, + }, + Sum: 100000, Count: 5050, Min: 1, Max: 9e+36, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + -1.5, -3, -6, -12, -24, -48, -96, -192, -384, -768, -1536, -3072, -6144, -12288, -24576, -49152, -98304, -196608, -393216, + -786432, -1.572864e+06, -3.145728e+06, -6.291456e+06, -1.2582912e+07, -2.5165824e+07, -5.0331648e+07, -1.00663296e+08, + -2.01326592e+08, -4.02653184e+08, -8.05306368e+08, -1.610612736e+09, -3.221225472e+09, -6.442450944e+09, -1.2884901888e+10, + -2.5769803776e+10, -5.1539607552e+10, -1.03079215104e+11, -2.06158430208e+11, -4.12316860416e+11, -8.24633720832e+11, + -1.649267441664e+12, -3.298534883328e+12, -6.597069766656e+12, -1.3194139533312e+13, -2.6388279066624e+13, -5.2776558133248e+13, + -1.05553116266496e+14, -2.11106232532992e+14, -4.22212465065984e+14, -8.44424930131968e+14, -1.688849860263936e+15, + -3.377699720527872e+15, -6.755399441055744e+15, -1.3510798882111488e+16, -2.7021597764222976e+16, + -5.404319552844595e+16, -1.080863910568919e+17, -2.161727821137838e+17, -4.323455642275676e+17, -8.646911284551352e+17, + -1.7293822569102705e+18, -3.458764513820541e+18, -6.917529027641082e+18, -1.3835058055282164e+19, -2.7670116110564327e+19, + -5.5340232221128655e+19, -1.1068046444225731e+20, -2.2136092888451462e+20, -4.4272185776902924e+20, -8.854437155380585e+20, + -1.770887431076117e+21, -3.541774862152234e+21, -7.083549724304468e+21, -1.4167099448608936e+22, -2.833419889721787e+22, + -5.666839779443574e+22, -1.1333679558887149e+23, -2.2667359117774297e+23, -4.5334718235548594e+23, -9.066943647109719e+23, + -1.8133887294219438e+24, -3.6267774588438875e+24, -7.253554917687775e+24, -1.450710983537555e+25, -2.90142196707511e+25, + -5.80284393415022e+25, -1.160568786830044e+26, -2.321137573660088e+26, -4.642275147320176e+26, -9.284550294640352e+26, + -1.8569100589280704e+27, -3.713820117856141e+27, -7.427640235712282e+27, -1.4855280471424563e+28, -2.9710560942849127e+28, + -5.942112188569825e+28, -1.188422437713965e+29, -2.37684487542793e+29, -4.75368975085586e+29, -9.50737950171172e+29, + }, + Counts: []float64{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + }, + Sum: 0, Count: 5050, Min: -9e+36, Max: -1, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + }, + }, + { + name: "Exponential histogram with more than 200 buckets, including positive, negative and zero buckets", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + posBucketCounts := make([]uint64, 120) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i + 1) + } + histogramDP.Positive().BucketCounts().FromRaw(posBucketCounts) + histogramDP.SetZeroCount(2) + negBucketCounts := make([]uint64, 120) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i + 1) + } + histogramDP.Negative().BucketCounts().FromRaw(negBucketCounts) + histogramDP.SetSum(100000) + histogramDP.SetMin(-9e+36) + histogramDP.SetMax(9e+36) + histogramDP.SetCount(uint64(3662)) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + expectedDatapoints: []dataPoint{ + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 9.969209968386869e+35, 4.9846049841934345e+35, 2.4923024920967173e+35, 1.2461512460483586e+35, 6.230756230241793e+34, + 3.1153781151208966e+34, 1.5576890575604483e+34, 7.788445287802241e+33, 3.894222643901121e+33, 1.9471113219505604e+33, + 9.735556609752802e+32, 4.867778304876401e+32, 2.4338891524382005e+32, 1.2169445762191002e+32, 6.084722881095501e+31, + 3.0423614405477506e+31, 1.5211807202738753e+31, 7.605903601369376e+30, 3.802951800684688e+30, 1.901475900342344e+30, + 9.50737950171172e+29, 4.75368975085586e+29, 2.37684487542793e+29, 1.188422437713965e+29, 5.942112188569825e+28, + 2.9710560942849127e+28, 1.4855280471424563e+28, 7.427640235712282e+27, 3.713820117856141e+27, 1.8569100589280704e+27, + 9.284550294640352e+26, 4.642275147320176e+26, 2.321137573660088e+26, 1.160568786830044e+26, 5.80284393415022e+25, + 2.90142196707511e+25, 1.450710983537555e+25, 7.253554917687775e+24, 3.6267774588438875e+24, 1.8133887294219438e+24, + 9.066943647109719e+23, 4.5334718235548594e+23, 2.2667359117774297e+23, 1.1333679558887149e+23, 5.666839779443574e+22, + 2.833419889721787e+22, 1.4167099448608936e+22, 7.083549724304468e+21, 3.541774862152234e+21, 1.770887431076117e+21, + 8.854437155380585e+20, 4.4272185776902924e+20, 2.2136092888451462e+20, 1.1068046444225731e+20, 5.5340232221128655e+19, + 2.7670116110564327e+19, 1.3835058055282164e+19, 6.917529027641082e+18, 3.458764513820541e+18, 1.7293822569102705e+18, + 8.646911284551352e+17, 4.323455642275676e+17, 2.161727821137838e+17, 1.080863910568919e+17, 5.404319552844595e+16, + 2.7021597764222976e+16, 1.3510798882111488e+16, 6.755399441055744e+15, 3.377699720527872e+15, 1.688849860263936e+15, + 8.44424930131968e+14, 4.22212465065984e+14, 2.11106232532992e+14, 1.05553116266496e+14, 5.2776558133248e+13, + 2.6388279066624e+13, 1.3194139533312e+13, 6.597069766656e+12, 3.298534883328e+12, 1.649267441664e+12, 8.24633720832e+11, + 4.12316860416e+11, 2.06158430208e+11, 1.03079215104e+11, 5.1539607552e+10, 2.5769803776e+10, 1.2884901888e+10, + 6.442450944e+09, 3.221225472e+09, 1.610612736e+09, 8.05306368e+08, 4.02653184e+08, 2.01326592e+08, 1.00663296e+08, 5.0331648e+07, + 2.5165824e+07, 1.2582912e+07, 6.291456e+06, 3.145728e+06, 1.572864e+06, + }, + Counts: []float64{ + 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, + 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, + 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, + 27, 26, 25, 24, 23, 22, 21, + }, + Sum: 100000, Count: 7050, Min: 1048576, Max: 9e+36, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 786432, 393216, 196608, 98304, 49152, 24576, 12288, 6144, 3072, 1536, 768, 384, 192, 96, 48, 24, + 12, 6, 3, 1.5, 0, -1.5, -3, -6, -12, -24, -48, -96, -192, -384, -768, -1536, + -3072, -6144, -12288, -24576, -49152, -98304, -196608, -393216, -786432, -1.572864e+06, -3.145728e+06, -6.291456e+06, + -1.2582912e+07, -2.5165824e+07, -5.0331648e+07, -1.00663296e+08, -2.01326592e+08, -4.02653184e+08, -8.05306368e+08, + -1.610612736e+09, -3.221225472e+09, -6.442450944e+09, -1.2884901888e+10, -2.5769803776e+10, -5.1539607552e+10, + -1.03079215104e+11, -2.06158430208e+11, -4.12316860416e+11, -8.24633720832e+11, -1.649267441664e+12, + -3.298534883328e+12, -6.597069766656e+12, -1.3194139533312e+13, -2.6388279066624e+13, -5.2776558133248e+13, + -1.05553116266496e+14, -2.11106232532992e+14, -4.22212465065984e+14, -8.44424930131968e+14, + -1.688849860263936e+15, -3.377699720527872e+15, -6.755399441055744e+15, -1.3510798882111488e+16, + -2.7021597764222976e+16, -5.404319552844595e+16, -1.080863910568919e+17, -2.161727821137838e+17, + -4.323455642275676e+17, -8.646911284551352e+17, -1.7293822569102705e+18, -3.458764513820541e+18, + -6.917529027641082e+18, -1.3835058055282164e+19, -2.7670116110564327e+19, -5.5340232221128655e+19, + -1.1068046444225731e+20, -2.2136092888451462e+20, -4.4272185776902924e+20, -8.854437155380585e+20, + -1.770887431076117e+21, -3.541774862152234e+21, -7.083549724304468e+21, -1.4167099448608936e+22, + -2.833419889721787e+22, -5.666839779443574e+22, -1.1333679558887149e+23, -2.2667359117774297e+23, + -4.5334718235548594e+23, + }, + Counts: []float64{ + 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + }, + Sum: 0, Count: 3372, Min: -6.044629098073146e+23, Max: 1048576, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + -9.066943647109719e+23, -1.8133887294219438e+24, -3.6267774588438875e+24, -7.253554917687775e+24, -1.450710983537555e+25, + -2.90142196707511e+25, -5.80284393415022e+25, -1.160568786830044e+26, -2.321137573660088e+26, -4.642275147320176e+26, + -9.284550294640352e+26, -1.8569100589280704e+27, -3.713820117856141e+27, -7.427640235712282e+27, -1.4855280471424563e+28, + -2.9710560942849127e+28, -5.942112188569825e+28, -1.188422437713965e+29, -2.37684487542793e+29, -4.75368975085586e+29, + -9.50737950171172e+29, -1.901475900342344e+30, -3.802951800684688e+30, -7.605903601369376e+30, -1.5211807202738753e+31, + -3.0423614405477506e+31, -6.084722881095501e+31, -1.2169445762191002e+32, -2.4338891524382005e+32, -4.867778304876401e+32, + -9.735556609752802e+32, -1.9471113219505604e+33, -3.894222643901121e+33, -7.788445287802241e+33, -1.5576890575604483e+34, + -3.1153781151208966e+34, -6.230756230241793e+34, -1.2461512460483586e+35, -2.4923024920967173e+35, -4.9846049841934345e+35, + -9.969209968386869e+35, + }, + Counts: []float64{ + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + }, + Sum: 0, Count: 4100, Min: -9e+36, Max: -6.044629098073146e+23, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + }, + }, + { + name: "Exponential histogram with more than 100 buckets, including positive, negative and zero buckets with zero counts", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + posBucketCounts := make([]uint64, 60) + for i := range posBucketCounts { + posBucketCounts[i] = uint64(i % 5) + } + histogramDP.Positive().BucketCounts().FromRaw(posBucketCounts) + histogramDP.SetZeroCount(2) + negBucketCounts := make([]uint64, 60) + for i := range negBucketCounts { + negBucketCounts[i] = uint64(i % 5) + } + histogramDP.Negative().BucketCounts().FromRaw(negBucketCounts) + histogramDP.SetSum(1000) + histogramDP.SetMin(-9e+17) + histogramDP.SetMax(9e+17) + histogramDP.SetCount(uint64(3662)) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + expectedDatapoints: []dataPoint{ + { + name: "foo", + value: &cWMetricHistogram{ + Values: []float64{ + 8.646911284551352e+17, 4.323455642275676e+17, 2.161727821137838e+17, 1.080863910568919e+17, 2.7021597764222976e+16, + 1.3510798882111488e+16, 6.755399441055744e+15, 3.377699720527872e+15, 8.44424930131968e+14, 4.22212465065984e+14, 2.11106232532992e+14, + 1.05553116266496e+14, 2.6388279066624e+13, 1.3194139533312e+13, 6.597069766656e+12, 3.298534883328e+12, 8.24633720832e+11, 4.12316860416e+11, + 2.06158430208e+11, 1.03079215104e+11, 2.5769803776e+10, 1.2884901888e+10, 6.442450944e+09, 3.221225472e+09, 8.05306368e+08, 4.02653184e+08, + 2.01326592e+08, 1.00663296e+08, 2.5165824e+07, 1.2582912e+07, 6.291456e+06, 3.145728e+06, 786432, 393216, 196608, 98304, 24576, 12288, 6144, 3072, + 768, 384, 192, 96, 24, 12, 6, 3, 0, -3, -6, -12, -24, -96, -192, -384, -768, -3072, -6144, -12288, -24576, -98304, -196608, -393216, -786432, + -3.145728e+06, -6.291456e+06, -1.2582912e+07, -2.5165824e+07, -1.00663296e+08, -2.01326592e+08, -4.02653184e+08, -8.05306368e+08, -3.221225472e+09, + -6.442450944e+09, -1.2884901888e+10, -2.5769803776e+10, -1.03079215104e+11, -2.06158430208e+11, -4.12316860416e+11, -8.24633720832e+11, -3.298534883328e+12, + -6.597069766656e+12, -1.3194139533312e+13, -2.6388279066624e+13, -1.05553116266496e+14, -2.11106232532992e+14, -4.22212465065984e+14, -8.44424930131968e+14, + -3.377699720527872e+15, -6.755399441055744e+15, -1.3510798882111488e+16, -2.7021597764222976e+16, -1.080863910568919e+17, -2.161727821137838e+17, + -4.323455642275676e+17, -8.646911284551352e+17, + }, + Counts: []float64{ + 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, + 2, 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, + }, + Sum: 1000, Count: 242, Min: -9e+17, Max: 9e+17, + }, + labels: map[string]string{oTellibDimensionKey: instrLibName, "label1": "value1"}, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(_ *testing.T) { + exponentialHistogramDatapointSlice := exponentialHistogramDataPointSlice{dmd, tc.histogramDPS} + emfCalcs := setupEmfCalculators() + defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) + dps, retained := exponentialHistogramDatapointSlice.CalculateDeltaDatapoints(0, instrLibName, false, emfCalcs) + + assert.True(t, retained) + assert.Equal(t, 1, exponentialHistogramDatapointSlice.Len()) + assert.Equal(t, len(tc.expectedDatapoints), len(dps)) + for i, expectedDP := range tc.expectedDatapoints { + assert.Equal(t, expectedDP, dps[i], "datapoint mismatch at index %d", i) + } + }) + } +} + func TestIsStaleNaNInf_ExponentialHistogramDataPointSlice(t *testing.T) { testCases := []struct { name string @@ -1521,12 +2118,14 @@ func TestGetDataPoints(t *testing.T) { }) } -func BenchmarkGetAndCalculateDeltaDataPoints(b *testing.B) { +func benchmarkGetAndCalculateDeltaDataPoints(b *testing.B, bucketLength int) { generateMetrics := []pmetric.Metrics{ generateTestGaugeMetric("int-gauge", intValueType), generateTestGaugeMetric("int-gauge", doubleValueType), generateTestHistogramMetric("histogram"), generateTestExponentialHistogramMetric("exponential-histogram"), + generateTestExponentialHistogramMetricWithSpecifiedNumberOfBuckets( + "exponential-histogram-buckets-"+strconv.Itoa(bucketLength), bucketLength), generateTestSumMetric("int-sum", intValueType), generateTestSumMetric("double-sum", doubleValueType), generateTestSummaryMetric("summary"), @@ -1549,3 +2148,19 @@ func BenchmarkGetAndCalculateDeltaDataPoints(b *testing.B) { } } } + +func BenchmarkGetAndCalculateDeltaDataPointsInclude100Buckets(b *testing.B) { + benchmarkGetAndCalculateDeltaDataPoints(b, 100) +} + +func BenchmarkGetAndCalculateDeltaDataPointsInclude200Buckets(b *testing.B) { + benchmarkGetAndCalculateDeltaDataPoints(b, 200) +} + +func BenchmarkGetAndCalculateDeltaDataPointsInclude300Buckets(b *testing.B) { + benchmarkGetAndCalculateDeltaDataPoints(b, 300) +} + +func BenchmarkGetAndCalculateDeltaDataPointsInclude500Buckets(b *testing.B) { + benchmarkGetAndCalculateDeltaDataPoints(b, 500) +} diff --git a/exporter/awsemfexporter/grouped_metric.go b/exporter/awsemfexporter/grouped_metric.go index 5ba39b93f787..8d4f8a57aa05 100644 --- a/exporter/awsemfexporter/grouped_metric.go +++ b/exporter/awsemfexporter/grouped_metric.go @@ -56,7 +56,7 @@ func addToGroupedMetric( continue } - for _, dp := range dps { + for i, dp := range dps { labels := dp.labels if metricType, ok := labels["Type"]; ok { @@ -86,6 +86,7 @@ func addToGroupedMetric( } // Extra params to use when grouping metrics + metadata.groupedMetricMetadata.batchIndex = i groupKey := aws.NewKey(metadata.groupedMetricMetadata, labels) if _, ok := groupedMetrics[groupKey]; ok { // if MetricName already exists in metrics map, print warning log diff --git a/exporter/awsemfexporter/grouped_metric_test.go b/exporter/awsemfexporter/grouped_metric_test.go index 8688cfaaca03..3300faf1e9a4 100644 --- a/exporter/awsemfexporter/grouped_metric_test.go +++ b/exporter/awsemfexporter/grouped_metric_test.go @@ -405,6 +405,43 @@ func TestAddToGroupedMetric(t *testing.T) { assert.Equal(t, 1, logs.Len()) assert.Equal(t, expectedLogs, logs.AllUntimed()) }) + + t.Run("Duplicate metric names with different metricIndex", func(t *testing.T) { + emfCalcs := setupEmfCalculators() + defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) + groupedMetrics := make(map[any]*groupedMetric) + generateMetrics := []pmetric.Metrics{ + generateTestExponentialHistogramMetricWithLongBuckets("test_multiBucket_metric"), + } + finalOtelMetrics := generateOtelTestMetrics(generateMetrics...) + + rms := finalOtelMetrics.ResourceMetrics() + ilms := rms.At(0).ScopeMetrics() + metrics := ilms.At(0).Metrics() + assert.Equal(t, 1, metrics.Len()) + + for i := 0; i < metrics.Len(); i++ { + err := addToGroupedMetric(metrics.At(i), + groupedMetrics, + generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(i).Type()), + true, + nil, + testCfg, + emfCalcs, + ) + assert.NoError(t, err) + } + assert.Len(t, groupedMetrics, 2) + expectedLabels := map[string]string{oTellibDimensionKey: instrumentationLibName, "label1": "value1"} + idx := 0 + for _, v := range groupedMetrics { + assert.Len(t, v.metrics, 1) + assert.Len(t, v.labels, 2) + assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(0).Type(), idx), v.metadata) + assert.Equal(t, expectedLabels, v.labels) + idx++ + } + }) } func TestAddKubernetesWrapper(t *testing.T) { @@ -509,7 +546,11 @@ func TestTranslateUnit(t *testing.T) { assert.Equal(t, "Count", v) } -func generateTestMetricMetadata(namespace string, timestamp int64, logGroup, logStreamName, instrumentationScopeName string, metricType pmetric.MetricType) cWMetricMetadata { +func generateTestMetricMetadata(namespace string, timestamp int64, logGroup, logStreamName, instrumentationScopeName string, metricType pmetric.MetricType, batchIndex ...int) cWMetricMetadata { + mIndex := 0 + if len(batchIndex) > 0 { + mIndex = batchIndex[0] + } return cWMetricMetadata{ receiver: prometheusReceiver, groupedMetricMetadata: groupedMetricMetadata{ @@ -518,6 +559,7 @@ func generateTestMetricMetadata(namespace string, timestamp int64, logGroup, log logGroup: logGroup, logStream: logStreamName, metricDataType: metricType, + batchIndex: mIndex, }, instrumentationScopeName: instrumentationScopeName, } diff --git a/exporter/awsemfexporter/metric_translator.go b/exporter/awsemfexporter/metric_translator.go index b5d9330503ce..09844e132c7e 100644 --- a/exporter/awsemfexporter/metric_translator.go +++ b/exporter/awsemfexporter/metric_translator.go @@ -85,6 +85,7 @@ type groupedMetricMetadata struct { logGroup string logStream string metricDataType pmetric.MetricType + batchIndex int retainInitialValueForDelta bool } @@ -150,6 +151,7 @@ func (mt metricTranslator) translateOTelToGroupedMetric(rm pmetric.ResourceMetri logGroup: logGroup, logStream: logStream, metricDataType: metric.Type(), + batchIndex: 0, retainInitialValueForDelta: deltaInitialValue, }, instrumentationScopeName: instrumentationScopeName, diff --git a/exporter/dorisexporter/README.md b/exporter/dorisexporter/README.md index 2aa6a0d2a307..c6f13aed25c3 100644 --- a/exporter/dorisexporter/README.md +++ b/exporter/dorisexporter/README.md @@ -2,15 +2,16 @@ | Status | | | ------------- |-----------| -| Stability | [development]: traces, metrics, logs | -| Distributions | [] | +| Stability | [alpha]: traces, metrics, logs | +| Distributions | [contrib] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fdoris%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fdoris) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fdoris%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fdoris) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@atoulme](https://www.github.com/atoulme), [@joker-star-l](https://www.github.com/joker-star-l) | -[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -This exporter supports sending traces, metrics, and logs data to [Apache Doris](https://doris.apache.org/) (version >= 2.1). +This exporter supports sending traces, metrics, and logs data to [Apache Doris](https://doris.apache.org/) (version >= 2.1.1). ## Configuration @@ -29,7 +30,7 @@ The following configuration options are supported: * `history_days` (default = 0) Data older than these days will be deleted; ignored if `create_schema` is false. If set to 0, historical data will not be deleted. * `create_history_days` (default = 0) The number of days in the history partition that was created when the table was created; ignored if `create_schema` is false. If `history_days` is not 0, `create_history_days` needs to be less than or equal to `history_days`. * `replication_num` (default = 1) The number of replicas of the table; ignored if `create_schema` is false. -* `timezone` (default is the time zone of the opentelemetry collector) The time zone of doris. +* `timezone` (default is UTC) The time zone of doris. * `sending_queue` [details here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#configuration) * `enabled` (default = true) * `num_consumers` (default = 10) Number of consumers that dequeue batches; ignored if `enabled` is false. @@ -73,3 +74,11 @@ exporters: max_interval: 30s max_elapsed_time: 300s ``` + +## Notes + +1. Time Zone + + The Doris exporter uses IANA Time Zone Database (known as tzdata) to handle time zones, so make sure tzdata is on your system. + + For example, when you use docker, you should add option `-v your/path/to/tzdata:/usr/share/zoneinfo` when running the container. diff --git a/exporter/dorisexporter/internal/metadata/generated_status.go b/exporter/dorisexporter/internal/metadata/generated_status.go index 57d6e7762b27..de7a97cca84a 100644 --- a/exporter/dorisexporter/internal/metadata/generated_status.go +++ b/exporter/dorisexporter/internal/metadata/generated_status.go @@ -12,7 +12,7 @@ var ( ) const ( - TracesStability = component.StabilityLevelDevelopment - MetricsStability = component.StabilityLevelDevelopment - LogsStability = component.StabilityLevelDevelopment + TracesStability = component.StabilityLevelAlpha + MetricsStability = component.StabilityLevelAlpha + LogsStability = component.StabilityLevelAlpha ) diff --git a/exporter/dorisexporter/metadata.yaml b/exporter/dorisexporter/metadata.yaml index 36f42a887342..6c1a7ec3443e 100644 --- a/exporter/dorisexporter/metadata.yaml +++ b/exporter/dorisexporter/metadata.yaml @@ -4,8 +4,8 @@ scope_name: otelcol/doris status: class: exporter stability: - development: [traces, metrics, logs] - distributions: [] + alpha: [traces, metrics, logs] + distributions: [contrib] codeowners: active: [atoulme, joker-star-l] diff --git a/extension/opampextension/factory_test.go b/extension/opampextension/factory_test.go index 5f763ab06f9f..eaa4d40c6d29 100644 --- a/extension/opampextension/factory_test.go +++ b/extension/opampextension/factory_test.go @@ -21,6 +21,7 @@ func TestFactory_CreateDefaultConfig(t *testing.T) { ext, err := createExtension(context.Background(), extensiontest.NewNopSettings(), cfg) require.NoError(t, err) require.NotNil(t, ext) + require.NoError(t, ext.Shutdown(context.Background())) } func TestFactory_Create(t *testing.T) { @@ -28,4 +29,5 @@ func TestFactory_Create(t *testing.T) { ext, err := createExtension(context.Background(), extensiontest.NewNopSettings(), cfg) require.NoError(t, err) require.NotNil(t, ext) + require.NoError(t, ext.Shutdown(context.Background())) } diff --git a/extension/opampextension/go.mod b/extension/opampextension/go.mod index d3f52d4cdffd..f8ca3811d77c 100644 --- a/extension/opampextension/go.mod +++ b/extension/opampextension/go.mod @@ -7,6 +7,7 @@ require ( github.com/oklog/ulid/v2 v2.1.0 github.com/open-telemetry/opamp-go v0.17.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/status v0.115.0 github.com/shirou/gopsutil/v4 v4.24.10 github.com/stretchr/testify v1.10.0 go.opentelemetry.io/collector/component v0.115.0 @@ -67,3 +68,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages => ../opampcustommessages + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/status => ../../pkg/status diff --git a/extension/opampextension/opamp_agent.go b/extension/opampextension/opamp_agent.go index 3482dc4fc071..c638e8727b05 100644 --- a/extension/opampextension/opamp_agent.go +++ b/extension/opampextension/opamp_agent.go @@ -33,9 +33,18 @@ import ( "gopkg.in/yaml.v3" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/status" ) -var _ extensioncapabilities.PipelineWatcher = (*opampAgent)(nil) +type statusAggregator interface { + Subscribe(scope status.Scope, verbosity status.Verbosity) (<-chan *status.AggregateStatus, status.UnsubscribeFunc) + RecordStatus(source *componentstatus.InstanceID, event *componentstatus.Event) +} + +type eventSourcePair struct { + source *componentstatus.InstanceID + event *componentstatus.Event +} type opampAgent struct { cfg *Config @@ -62,12 +71,21 @@ type opampAgent struct { opampClient client.OpAMPClient customCapabilityRegistry *customCapabilityRegistry + + statusAggregator statusAggregator + statusSubscriptionWg *sync.WaitGroup + componentHealthWg *sync.WaitGroup + startTimeUnixNano uint64 + componentStatusCh chan *eventSourcePair + readyCh chan struct{} } var ( _ opampcustommessages.CustomCapabilityRegistry = (*opampAgent)(nil) _ extensioncapabilities.Dependent = (*opampAgent)(nil) _ extensioncapabilities.ConfigWatcher = (*opampAgent)(nil) + _ extensioncapabilities.PipelineWatcher = (*opampAgent)(nil) + _ componentstatus.Watcher = (*opampAgent)(nil) ) func (o *opampAgent) Start(ctx context.Context, host component.Host) error { @@ -85,8 +103,6 @@ func (o *opampAgent) Start(ctx context.Context, host component.Host) error { return err } - o.lifetimeCtx, o.lifetimeCtxCancel = context.WithCancel(context.Background()) - if o.cfg.PPID != 0 { go monitorPPID(o.lifetimeCtx, o.cfg.PPIDPollInterval, o.cfg.PPID, o.reportFunc) } @@ -128,8 +144,6 @@ func (o *opampAgent) Start(ctx context.Context, host component.Host) error { return err } - o.setHealth(&protobufs.ComponentHealth{Healthy: false}) - o.logger.Debug("Starting OpAMP client...") if err := o.opampClient.Start(context.Background(), settings); err != nil { @@ -146,6 +160,9 @@ func (o *opampAgent) Shutdown(ctx context.Context) error { o.lifetimeCtxCancel() } + o.statusSubscriptionWg.Wait() + o.componentHealthWg.Wait() + o.logger.Debug("OpAMP agent shutting down...") if o.opampClient == nil { return nil @@ -190,6 +207,7 @@ func (o *opampAgent) Register(capability string, opts ...opampcustommessages.Cus func (o *opampAgent) Ready() error { o.setHealth(&protobufs.ComponentHealth{Healthy: true}) + close(o.readyCh) return nil } @@ -198,6 +216,27 @@ func (o *opampAgent) NotReady() error { return nil } +// ComponentStatusChanged implements the componentstatus.Watcher interface. +func (o *opampAgent) ComponentStatusChanged( + source *componentstatus.InstanceID, + event *componentstatus.Event, +) { + // There can be late arriving events after shutdown. We need to close + // the event channel so that this function doesn't block and we release all + // goroutines, but attempting to write to a closed channel will panic; log + // and recover. + defer func() { + if r := recover(); r != nil { + o.logger.Info( + "discarding event received after shutdown", + zap.Any("source", source), + zap.Any("event", event), + ) + } + }() + o.componentStatusCh <- &eventSourcePair{source: source, event: event} +} + func (o *opampAgent) updateEffectiveConfig(conf *confmap.Conf) { o.eclk.Lock() defer o.eclk.Unlock() @@ -249,9 +288,18 @@ func newOpampAgent(cfg *Config, set extension.Settings) (*opampAgent, error) { instanceID: uid, capabilities: cfg.Capabilities, opampClient: opampClient, + statusSubscriptionWg: &sync.WaitGroup{}, + componentHealthWg: &sync.WaitGroup{}, + readyCh: make(chan struct{}), customCapabilityRegistry: newCustomCapabilityRegistry(set.Logger, opampClient), } + agent.lifetimeCtx, agent.lifetimeCtxCancel = context.WithCancel(context.Background()) + + if agent.capabilities.ReportsHealth { + agent.initHealthReporting() + } + return agent, nil } @@ -372,6 +420,11 @@ func (o *opampAgent) onMessage(_ context.Context, msg *types.MessageData) { func (o *opampAgent) setHealth(ch *protobufs.ComponentHealth) { if o.capabilities.ReportsHealth && o.opampClient != nil { + if ch.Healthy && o.startTimeUnixNano == 0 { + ch.StartTimeUnixNano = ch.StatusTimeUnixNano + } else { + ch.StartTimeUnixNano = o.startTimeUnixNano + } if err := o.opampClient.SetHealth(ch); err != nil { o.logger.Error("Could not report health to OpAMP server", zap.Error(err)) } @@ -395,3 +448,120 @@ func getOSDescription(logger *zap.Logger) string { return runtime.GOOS } } + +func (o *opampAgent) initHealthReporting() { + if !o.capabilities.ReportsHealth { + return + } + o.setHealth(&protobufs.ComponentHealth{Healthy: false}) + + if o.statusAggregator == nil { + o.statusAggregator = status.NewAggregator(status.PriorityPermanent) + } + statusChan, unsubscribeFunc := o.statusAggregator.Subscribe(status.ScopeAll, status.Verbose) + o.statusSubscriptionWg.Add(1) + go o.statusAggregatorEventLoop(unsubscribeFunc, statusChan) + + // Start processing events in the background so that our status watcher doesn't + // block others before the extension starts. + o.componentStatusCh = make(chan *eventSourcePair) + o.componentHealthWg.Add(1) + go o.componentHealthEventLoop() +} + +func (o *opampAgent) componentHealthEventLoop() { + // Record events with component.StatusStarting, but queue other events until + // PipelineWatcher.Ready is called. This prevents aggregate statuses from + // flapping between StatusStarting and StatusOK as components are started + // individually by the service. + var eventQueue []*eventSourcePair + + defer o.componentHealthWg.Done() + for loop := true; loop; { + select { + case esp, ok := <-o.componentStatusCh: + if !ok { + return + } + if esp.event.Status() != componentstatus.StatusStarting { + eventQueue = append(eventQueue, esp) + continue + } + o.statusAggregator.RecordStatus(esp.source, esp.event) + case <-o.readyCh: + for _, esp := range eventQueue { + o.statusAggregator.RecordStatus(esp.source, esp.event) + } + eventQueue = nil + loop = false + case <-o.lifetimeCtx.Done(): + return + } + } + + // After PipelineWatcher.Ready, record statuses as they are received. + for { + select { + case esp, ok := <-o.componentStatusCh: + if !ok { + return + } + o.statusAggregator.RecordStatus(esp.source, esp.event) + case <-o.lifetimeCtx.Done(): + return + } + } +} + +func (o *opampAgent) statusAggregatorEventLoop(unsubscribeFunc status.UnsubscribeFunc, statusChan <-chan *status.AggregateStatus) { + defer func() { + unsubscribeFunc() + o.statusSubscriptionWg.Done() + }() + for { + select { + case <-o.lifetimeCtx.Done(): + return + case statusUpdate, ok := <-statusChan: + if !ok { + return + } + + if statusUpdate == nil || statusUpdate.Status() == componentstatus.StatusNone { + continue + } + + componentHealth := convertComponentHealth(statusUpdate) + + o.setHealth(componentHealth) + } + } +} + +func convertComponentHealth(statusUpdate *status.AggregateStatus) *protobufs.ComponentHealth { + var isHealthy bool + if statusUpdate.Status() == componentstatus.StatusOK { + isHealthy = true + } else { + isHealthy = false + } + + componentHealth := &protobufs.ComponentHealth{ + Healthy: isHealthy, + Status: statusUpdate.Status().String(), + StatusTimeUnixNano: uint64(statusUpdate.Timestamp().UnixNano()), + } + + if statusUpdate.Err() != nil { + componentHealth.LastError = statusUpdate.Err().Error() + } + + if len(statusUpdate.ComponentStatusMap) > 0 { + componentHealth.ComponentHealthMap = map[string]*protobufs.ComponentHealth{} + for comp, compState := range statusUpdate.ComponentStatusMap { + componentHealth.ComponentHealthMap[comp] = convertComponentHealth(compState) + } + } + + return componentHealth +} diff --git a/extension/opampextension/opamp_agent_test.go b/extension/opampextension/opamp_agent_test.go index b9ee21e0e905..7921bd767470 100644 --- a/extension/opampextension/opamp_agent_test.go +++ b/extension/opampextension/opamp_agent_test.go @@ -5,21 +5,30 @@ package opampextension import ( "context" + "fmt" "os" "path/filepath" "runtime" + "sync" "testing" + "time" "github.com/google/uuid" + "github.com/open-telemetry/opamp-go/client/types" "github.com/open-telemetry/opamp-go/protobufs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/extension/extensiontest" semconv "go.opentelemetry.io/collector/semconv/v1.27.0" "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/status" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/status/testhelpers" ) func TestNewOpampAgent(t *testing.T) { @@ -35,6 +44,7 @@ func TestNewOpampAgent(t *testing.T) { assert.True(t, o.capabilities.ReportsHealth) assert.Empty(t, o.effectiveConfig) assert.Nil(t, o.agentDescription) + assert.NoError(t, o.Shutdown(context.Background())) } func TestNewOpampAgentAttributes(t *testing.T) { @@ -49,6 +59,7 @@ func TestNewOpampAgentAttributes(t *testing.T) { assert.Equal(t, "otelcol-distro", o.agentType) assert.Equal(t, "distro.0", o.agentVersion) assert.Equal(t, "f8999bc1-4c9b-4619-9bae-7f009d2411ec", o.instanceID.String()) + assert.NoError(t, o.Shutdown(context.Background())) } func TestCreateAgentDescription(t *testing.T) { @@ -147,6 +158,7 @@ func TestCreateAgentDescription(t *testing.T) { err = o.createAgentDescription() assert.NoError(t, err) require.Equal(t, tc.expected, o.agentDescription) + assert.NoError(t, o.Shutdown(context.Background())) }) } } @@ -165,6 +177,7 @@ func TestUpdateAgentIdentity(t *testing.T) { o.updateAgentIdentity(uid) assert.Equal(t, o.instanceID, uid) + assert.NoError(t, o.Shutdown(context.Background())) } func TestComposeEffectiveConfig(t *testing.T) { @@ -188,6 +201,8 @@ func TestComposeEffectiveConfig(t *testing.T) { assert.NotNil(t, ec) assert.YAMLEq(t, string(expected), string(ec.ConfigMap.ConfigMap[""].Body)) assert.Equal(t, "text/yaml", ec.ConfigMap.ConfigMap[""].ContentType) + + assert.NoError(t, o.Shutdown(context.Background())) } func TestShutdown(t *testing.T) { @@ -197,7 +212,7 @@ func TestShutdown(t *testing.T) { assert.NoError(t, err) // Shutdown with no OpAMP client - assert.NoError(t, o.Shutdown(context.TODO())) + assert.NoError(t, o.Shutdown(context.Background())) } func TestStart(t *testing.T) { @@ -206,8 +221,295 @@ func TestStart(t *testing.T) { o, err := newOpampAgent(cfg.(*Config), set) assert.NoError(t, err) - assert.NoError(t, o.Start(context.TODO(), componenttest.NewNopHost())) - assert.NoError(t, o.Shutdown(context.TODO())) + assert.NoError(t, o.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, o.Shutdown(context.Background())) +} + +func TestHealthReportingReceiveUpdateFromAggregator(t *testing.T) { + cfg := createDefaultConfig().(*Config) + set := extensiontest.NewNopSettings() + + statusUpdateChannel := make(chan *status.AggregateStatus) + + mtx := &sync.RWMutex{} + now := time.Now() + expectedHealthUpdates := []*protobufs.ComponentHealth{ + { + Healthy: false, + }, + { + Healthy: true, + StartTimeUnixNano: uint64(now.UnixNano()), + Status: "StatusOK", + StatusTimeUnixNano: uint64(now.UnixNano()), + ComponentHealthMap: map[string]*protobufs.ComponentHealth{ + "test-receiver": { + Healthy: true, + Status: "StatusOK", + StatusTimeUnixNano: uint64(now.UnixNano()), + }, + }, + }, + { + Healthy: false, + Status: "StatusPermanentError", + StatusTimeUnixNano: uint64(now.UnixNano()), + LastError: "unexpected error", + ComponentHealthMap: map[string]*protobufs.ComponentHealth{ + "test-receiver": { + Healthy: false, + Status: "StatusPermanentError", + StatusTimeUnixNano: uint64(now.UnixNano()), + LastError: "unexpected error", + }, + }, + }, + } + receivedHealthUpdates := 0 + + mockOpampClient := &mockOpAMPClient{ + setHealthFunc: func(health *protobufs.ComponentHealth) error { + mtx.Lock() + defer mtx.Unlock() + require.Equal(t, expectedHealthUpdates[receivedHealthUpdates], health) + receivedHealthUpdates++ + return nil + }, + } + + sa := &mockStatusAggregator{ + statusChan: statusUpdateChannel, + } + + o := newTestOpampAgent(cfg, set, mockOpampClient, sa) + + o.initHealthReporting() + + assert.NoError(t, o.Start(context.Background(), componenttest.NewNopHost())) + + statusUpdateChannel <- nil + statusUpdateChannel <- &status.AggregateStatus{ + Event: &mockStatusEvent{ + status: componentstatus.StatusOK, + err: nil, + timestamp: now, + }, + ComponentStatusMap: map[string]*status.AggregateStatus{ + "test-receiver": { + Event: &mockStatusEvent{ + status: componentstatus.StatusOK, + err: nil, + timestamp: now, + }, + }, + }, + } + statusUpdateChannel <- &status.AggregateStatus{ + Event: &mockStatusEvent{ + status: componentstatus.StatusPermanentError, + err: fmt.Errorf("unexpected error"), + timestamp: now, + }, + ComponentStatusMap: map[string]*status.AggregateStatus{ + "test-receiver": { + Event: &mockStatusEvent{ + status: componentstatus.StatusPermanentError, + err: fmt.Errorf("unexpected error"), + timestamp: now, + }, + }, + }, + } + + close(statusUpdateChannel) + + require.Eventually(t, func() bool { + mtx.RLock() + defer mtx.RUnlock() + return receivedHealthUpdates == len(expectedHealthUpdates) + }, 1*time.Second, 100*time.Millisecond) + + assert.NoError(t, o.Shutdown(context.Background())) + require.True(t, sa.unsubscribed) +} + +func TestHealthReportingForwardComponentHealthToAggregator(t *testing.T) { + cfg := createDefaultConfig().(*Config) + set := extensiontest.NewNopSettings() + + mtx := &sync.RWMutex{} + + sa := &mockStatusAggregator{ + mtx: mtx, + } + + o := newTestOpampAgent( + cfg, + set, + &mockOpAMPClient{ + setHealthFunc: func(_ *protobufs.ComponentHealth) error { + return nil + }, + }, sa) + + o.initHealthReporting() + + assert.NoError(t, o.Start(context.Background(), componenttest.NewNopHost())) + + traces := testhelpers.NewPipelineMetadata("traces") + + // StatusStarting will be sent immediately. + for _, id := range traces.InstanceIDs() { + o.ComponentStatusChanged(id, componentstatus.NewEvent(componentstatus.StatusStarting)) + } + + // StatusOK will be queued until the PipelineWatcher Ready method is called. + for _, id := range traces.InstanceIDs() { + o.ComponentStatusChanged(id, componentstatus.NewEvent(componentstatus.StatusOK)) + } + + // verify we have received the StatusStarting events + require.Eventually(t, func() bool { + mtx.RLock() + defer mtx.RUnlock() + return len(sa.receivedEvents) == len(traces.InstanceIDs()) + }, 5*time.Second, 100*time.Millisecond) + + for _, event := range sa.receivedEvents { + require.Equal(t, componentstatus.NewEvent(componentstatus.StatusStarting).Status(), event.event.Status()) + } + + // clean the received events of the mocked status aggregator + sa.receivedEvents = nil + + err := o.Ready() + require.NoError(t, err) + + // verify we have received the StatusOK events that have been queued while the agent has not been ready + require.Eventually(t, func() bool { + mtx.RLock() + defer mtx.RUnlock() + return len(sa.receivedEvents) == len(traces.InstanceIDs()) + }, 5*time.Second, 100*time.Millisecond) + + for _, event := range sa.receivedEvents { + require.Equal(t, componentstatus.NewEvent(componentstatus.StatusOK).Status(), event.event.Status()) + } + + // clean the received events of the mocked status aggregator + sa.receivedEvents = nil + + // send another set of events - these should be passed through immediately + for _, id := range traces.InstanceIDs() { + o.ComponentStatusChanged(id, componentstatus.NewEvent(componentstatus.StatusStopping)) + } + + require.Eventually(t, func() bool { + mtx.RLock() + defer mtx.RUnlock() + return len(sa.receivedEvents) == len(traces.InstanceIDs()) + }, 5*time.Second, 100*time.Millisecond) + + for _, event := range sa.receivedEvents { + require.Equal(t, componentstatus.NewEvent(componentstatus.StatusStopping).Status(), event.event.Status()) + } + + assert.NoError(t, o.Shutdown(context.Background())) + require.True(t, sa.unsubscribed) +} + +func TestHealthReportingExitsOnClosedContext(t *testing.T) { + cfg := createDefaultConfig().(*Config) + set := extensiontest.NewNopSettings() + + statusUpdateChannel := make(chan *status.AggregateStatus) + sa := &mockStatusAggregator{ + statusChan: statusUpdateChannel, + } + + mtx := &sync.RWMutex{} + now := time.Now() + expectedHealthUpdates := []*protobufs.ComponentHealth{ + { + Healthy: false, + }, + { + Healthy: true, + StartTimeUnixNano: uint64(now.UnixNano()), + Status: "StatusOK", + StatusTimeUnixNano: uint64(now.UnixNano()), + ComponentHealthMap: map[string]*protobufs.ComponentHealth{ + "test-receiver": { + Healthy: true, + Status: "StatusOK", + StatusTimeUnixNano: uint64(now.UnixNano()), + }, + }, + }, + } + receivedHealthUpdates := 0 + + mockOpampClient := &mockOpAMPClient{ + setHealthFunc: func(health *protobufs.ComponentHealth) error { + mtx.Lock() + defer mtx.Unlock() + require.Equal(t, expectedHealthUpdates[receivedHealthUpdates], health) + receivedHealthUpdates++ + return nil + }, + } + + o := newTestOpampAgent(cfg, set, mockOpampClient, sa) + + o.initHealthReporting() + + assert.NoError(t, o.Start(context.Background(), componenttest.NewNopHost())) + + statusUpdateChannel <- nil + statusUpdateChannel <- &status.AggregateStatus{ + Event: &mockStatusEvent{ + status: componentstatus.StatusOK, + err: nil, + timestamp: now, + }, + ComponentStatusMap: map[string]*status.AggregateStatus{ + "test-receiver": { + Event: &mockStatusEvent{ + status: componentstatus.StatusOK, + err: nil, + timestamp: now, + }, + }, + }, + } + + require.Eventually(t, func() bool { + mtx.RLock() + defer mtx.RUnlock() + return receivedHealthUpdates == len(expectedHealthUpdates) + }, 1*time.Second, 100*time.Millisecond) + + // invoke Shutdown before health update channel has been closed + assert.NoError(t, o.Shutdown(context.Background())) + require.True(t, sa.unsubscribed) +} + +func TestHealthReportingDisabled(t *testing.T) { + cfg := createDefaultConfig() + set := extensiontest.NewNopSettings() + o, err := newOpampAgent(cfg.(*Config), set) + assert.NoError(t, err) + + o.capabilities.ReportsHealth = false + o.opampClient = &mockOpAMPClient{ + setHealthFunc: func(_ *protobufs.ComponentHealth) error { + t.Errorf("setHealth is not supposed to be called with deactivated ReportsHealth capability") + return nil + }, + } + + assert.NoError(t, o.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, o.Shutdown(context.Background())) } func TestParseInstanceIDString(t *testing.T) { @@ -283,3 +585,114 @@ func TestOpAMPAgent_Dependencies(t *testing.T) { require.Equal(t, []component.ID{authID}, o.Dependencies()) }) } + +type mockStatusAggregator struct { + statusChan chan *status.AggregateStatus + receivedEvents []eventSourcePair + unsubscribed bool + mtx *sync.RWMutex +} + +func (m *mockStatusAggregator) Subscribe(_ status.Scope, _ status.Verbosity) (<-chan *status.AggregateStatus, status.UnsubscribeFunc) { + return m.statusChan, func() { + m.unsubscribed = true + } +} + +func (m *mockStatusAggregator) RecordStatus(source *componentstatus.InstanceID, event *componentstatus.Event) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.receivedEvents = append(m.receivedEvents, eventSourcePair{ + source: source, + event: event, + }) +} + +type mockOpAMPClient struct { + setHealthFunc func(health *protobufs.ComponentHealth) error +} + +func (m mockOpAMPClient) Start(_ context.Context, _ types.StartSettings) error { + return nil +} + +func (m mockOpAMPClient) Stop(_ context.Context) error { + return nil +} + +func (m mockOpAMPClient) SetAgentDescription(_ *protobufs.AgentDescription) error { + return nil +} + +func (m mockOpAMPClient) AgentDescription() *protobufs.AgentDescription { + return nil +} + +func (m mockOpAMPClient) SetHealth(health *protobufs.ComponentHealth) error { + return m.setHealthFunc(health) +} + +func (m mockOpAMPClient) UpdateEffectiveConfig(_ context.Context) error { + return nil +} + +func (m mockOpAMPClient) SetRemoteConfigStatus(_ *protobufs.RemoteConfigStatus) error { + return nil +} + +func (m mockOpAMPClient) SetPackageStatuses(_ *protobufs.PackageStatuses) error { + return nil +} + +func (m mockOpAMPClient) RequestConnectionSettings(_ *protobufs.ConnectionSettingsRequest) error { + return nil +} + +func (m mockOpAMPClient) SetCustomCapabilities(_ *protobufs.CustomCapabilities) error { + return nil +} + +func (m mockOpAMPClient) SendCustomMessage(_ *protobufs.CustomMessage) (messageSendingChannel chan struct{}, err error) { + return nil, nil +} + +func (m mockOpAMPClient) SetFlags(_ protobufs.AgentToServerFlags) {} + +type mockStatusEvent struct { + status componentstatus.Status + err error + timestamp time.Time +} + +func (m mockStatusEvent) Status() componentstatus.Status { + return m.status +} + +func (m mockStatusEvent) Err() error { + return m.err +} + +func (m mockStatusEvent) Timestamp() time.Time { + return m.timestamp +} + +func newTestOpampAgent(cfg *Config, set extension.Settings, mockOpampClient *mockOpAMPClient, sa *mockStatusAggregator) *opampAgent { + uid := uuid.New() + o := &opampAgent{ + cfg: cfg, + logger: set.Logger, + agentType: set.BuildInfo.Command, + agentVersion: set.BuildInfo.Version, + instanceID: uid, + capabilities: cfg.Capabilities, + opampClient: mockOpampClient, + statusSubscriptionWg: &sync.WaitGroup{}, + componentHealthWg: &sync.WaitGroup{}, + readyCh: make(chan struct{}), + customCapabilityRegistry: newCustomCapabilityRegistry(set.Logger, mockOpampClient), + statusAggregator: sa, + } + + o.lifetimeCtx, o.lifetimeCtxCancel = context.WithCancel(context.Background()) + return o +} diff --git a/pkg/ottl/CONTRIBUTING.md b/pkg/ottl/CONTRIBUTING.md index 0bd2d24a7a82..e5e54971b6ce 100644 --- a/pkg/ottl/CONTRIBUTING.md +++ b/pkg/ottl/CONTRIBUTING.md @@ -15,8 +15,13 @@ Your proposal likely will be accepted if: - The proposed functionality is missing, - The proposed solution significantly improves user experience and readability for very common use cases, - The proposed solution is more performant in cases where it is possible to achieve the same result with existing options. +- The proposed solution makes use of packages from the Go standard library to offer functionality possible through an existing option in a more standard or reliable manner. -It will be up for discussion if your proposal solves an issue that can be achieved in another way but does not improve user experience or performance. +It will be up for discussion if: + +- Your proposal solves an issue that can be achieved in another way but does not improve user experience or performance. +- The proposed functionality is not obviously applicable to the needs of a significant number of OTTL users. +- Your proposal extracts data into a structure with enumerable keys or values and OpenTelemetry semantic conventions do not cover the shape or values for this data. Your proposal likely won't be accepted if: diff --git a/pkg/translator/prometheusremotewrite/helper.go b/pkg/translator/prometheusremotewrite/helper.go index 252973b96dc2..939f3ae5e5e3 100644 --- a/pkg/translator/prometheusremotewrite/helper.go +++ b/pkg/translator/prometheusremotewrite/helper.go @@ -301,9 +301,18 @@ func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar { exemplar := pt.Exemplars().At(i) exemplarRunes := 0 - promExemplar := prompb.Exemplar{ - Value: exemplar.DoubleValue(), - Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), + var promExemplar prompb.Exemplar + switch exemplar.ValueType() { + case pmetric.ExemplarValueTypeInt: + promExemplar = prompb.Exemplar{ + Value: float64(exemplar.IntValue()), + Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), + } + case pmetric.ExemplarValueTypeDouble: + promExemplar = prompb.Exemplar{ + Value: exemplar.DoubleValue(), + Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), + } } if traceID := exemplar.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) diff --git a/pkg/translator/prometheusremotewrite/helper_test.go b/pkg/translator/prometheusremotewrite/helper_test.go index 12c0dcd978be..631ff7c831b1 100644 --- a/pkg/translator/prometheusremotewrite/helper_test.go +++ b/pkg/translator/prometheusremotewrite/helper_test.go @@ -479,6 +479,17 @@ func Test_getPromExemplars(t *testing.T) { }, }, }, + { + "with_exemplars_int_value", + getHistogramDataPointWithExemplars(t, tnow, intVal2, traceIDValue1, spanIDValue1, label11, value11), + []prompb.Exemplar{ + { + Value: float64(intVal2), + Timestamp: timestamp.FromTime(tnow), + Labels: []prompb.Label{getLabel(prometheustranslator.ExemplarTraceIDKey, traceIDValue1), getLabel(prometheustranslator.ExemplarSpanIDKey, spanIDValue1), getLabel(label11, value11)}, + }, + }, + }, { "too_many_runes_drops_labels", getHistogramDataPointWithExemplars(t, tnow, floatVal1, "", "", keyWith129Runes, ""), diff --git a/pkg/translator/prometheusremotewrite/testutils_test.go b/pkg/translator/prometheusremotewrite/testutils_test.go index 49ef7a735081..42a8dc48f9e7 100644 --- a/pkg/translator/prometheusremotewrite/testutils_test.go +++ b/pkg/translator/prometheusremotewrite/testutils_test.go @@ -184,11 +184,16 @@ func getTimeSeriesWithSamplesAndExemplars(labels []prompb.Label, samples []promp } } -func getHistogramDataPointWithExemplars(t *testing.T, time time.Time, value float64, traceID string, spanID string, attributeKey string, attributeValue string) pmetric.HistogramDataPoint { +func getHistogramDataPointWithExemplars[V int64 | float64](t *testing.T, time time.Time, value V, traceID string, spanID string, attributeKey string, attributeValue string) pmetric.HistogramDataPoint { h := pmetric.NewHistogramDataPoint() e := h.Exemplars().AppendEmpty() - e.SetDoubleValue(value) + switch v := (any)(value).(type) { + case int64: + e.SetIntValue(v) + case float64: + e.SetDoubleValue(v) + } e.SetTimestamp(pcommon.NewTimestampFromTime(time)) if attributeKey != "" || attributeValue != "" { e.FilteredAttributes().PutStr(attributeKey, attributeValue) diff --git a/receiver/elasticsearchreceiver/config_test.go b/receiver/elasticsearchreceiver/config_test.go index 149f9bed0b1f..4d6974984246 100644 --- a/receiver/elasticsearchreceiver/config_test.go +++ b/receiver/elasticsearchreceiver/config_test.go @@ -4,7 +4,6 @@ package elasticsearchreceiver import ( - "net/http" "path/filepath" "testing" "time" @@ -140,11 +139,6 @@ func TestValidateEndpoint(t *testing.T) { } func TestLoadConfig(t *testing.T) { - defaultMaxIdleConns := http.DefaultTransport.(*http.Transport).MaxIdleConns - defaultMaxIdleConnsPerHost := http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost - defaultMaxConnsPerHost := http.DefaultTransport.(*http.Transport).MaxConnsPerHost - defaultIdleConnTimeout := http.DefaultTransport.(*http.Transport).IdleConnTimeout - t.Parallel() cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) @@ -173,15 +167,13 @@ func TestLoadConfig(t *testing.T) { MetricsBuilderConfig: defaultMetrics, Username: "otel", Password: "password", - ClientConfig: confighttp.ClientConfig{ - Timeout: 10000000000, - Endpoint: "http://example.com:9200", - Headers: map[string]configopaque.String{}, - MaxIdleConns: &defaultMaxIdleConns, - MaxIdleConnsPerHost: &defaultMaxIdleConnsPerHost, - MaxConnsPerHost: &defaultMaxConnsPerHost, - IdleConnTimeout: &defaultIdleConnTimeout, - }, + ClientConfig: func() confighttp.ClientConfig { + client := confighttp.NewDefaultClientConfig() + client.Timeout = 10000000000 + client.Endpoint = "http://example.com:9200" + client.Headers = map[string]configopaque.String{} + return client + }(), }, }, } diff --git a/receiver/googlecloudspannerreceiver/go.mod b/receiver/googlecloudspannerreceiver/go.mod index 8b6e95d0c74d..91339130e278 100644 --- a/receiver/googlecloudspannerreceiver/go.mod +++ b/receiver/googlecloudspannerreceiver/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/google go 1.22.0 require ( - cloud.google.com/go/spanner v1.70.0 + cloud.google.com/go/spanner v1.73.0 github.com/ReneKroon/ttlcache/v2 v2.11.0 github.com/mitchellh/hashstructure v1.1.0 github.com/stretchr/testify v1.10.0 diff --git a/receiver/googlecloudspannerreceiver/go.sum b/receiver/googlecloudspannerreceiver/go.sum index 07de9870f900..afe40fa505c7 100644 --- a/receiver/googlecloudspannerreceiver/go.sum +++ b/receiver/googlecloudspannerreceiver/go.sum @@ -526,8 +526,8 @@ cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+ cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/spanner v1.70.0 h1:nj6p/GJTgMDiSQ1gQ034ItsKuJgHiMOjtOlONOg8PSo= -cloud.google.com/go/spanner v1.70.0/go.mod h1:X5T0XftydYp0K1adeJQDJtdWpbrOeJ7wHecM4tK6FiE= +cloud.google.com/go/spanner v1.73.0 h1:0bab8QDn6MNj9lNK6XyGAVFhMlhMU2waePPa6GZNoi8= +cloud.google.com/go/spanner v1.73.0/go.mod h1:mw98ua5ggQXVWwp83yjwggqEmW9t8rjs9Po1ohcUGW4= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= diff --git a/receiver/k8seventsreceiver/k8s_event_to_logdata.go b/receiver/k8seventsreceiver/k8s_event_to_logdata.go index 31d2b4cdbb11..9544eb0a6d6f 100644 --- a/receiver/k8seventsreceiver/k8s_event_to_logdata.go +++ b/receiver/k8seventsreceiver/k8s_event_to_logdata.go @@ -8,7 +8,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" - semconv "go.opentelemetry.io/collector/semconv/v1.6.1" + semconv "go.opentelemetry.io/collector/semconv/v1.27.0" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" ) diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata.go index fbb415155dc4..15dabf6f78b0 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata.go @@ -9,7 +9,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" - semconv "go.opentelemetry.io/collector/semconv/v1.9.0" + semconv "go.opentelemetry.io/collector/semconv/v1.27.0" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/watch" ) diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go index da71680ba5b1..d6ed1c1d1e8a 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - semconv "go.opentelemetry.io/collector/semconv/v1.9.0" + semconv "go.opentelemetry.io/collector/semconv/v1.27.0" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" diff --git a/reports/distributions/contrib.yaml b/reports/distributions/contrib.yaml index 7f93791eccb8..930aa89c7d12 100644 --- a/reports/distributions/contrib.yaml +++ b/reports/distributions/contrib.yaml @@ -28,6 +28,7 @@ components: - coralogix - datadog - dataset + - doris - elasticsearch - file - googlecloud diff --git a/testbed/stabilitytests/metric_test.go b/testbed/stabilitytests/metric_test.go index 6a68229d4c6d..05d310bfdc2c 100644 --- a/testbed/stabilitytests/metric_test.go +++ b/testbed/stabilitytests/metric_test.go @@ -26,6 +26,7 @@ func TestStabilityMetricsOTLP(t *testing.T) { contribPerfResultsSummary, nil, nil, + nil, ) } @@ -42,6 +43,7 @@ func TestStabilityMetricsOpenCensus(t *testing.T) { contribPerfResultsSummary, nil, nil, + nil, ) } @@ -58,6 +60,7 @@ func TestStabilityMetricsCarbon(t *testing.T) { contribPerfResultsSummary, nil, nil, + nil, ) } @@ -74,5 +77,6 @@ func TestStabilityMetricsSignalFx(t *testing.T) { contribPerfResultsSummary, nil, nil, + nil, ) } diff --git a/testbed/stabilitytests/trace_test.go b/testbed/stabilitytests/trace_test.go index 2006b664b855..85eeca8d366e 100644 --- a/testbed/stabilitytests/trace_test.go +++ b/testbed/stabilitytests/trace_test.go @@ -52,6 +52,7 @@ func TestStabilityTracesOpenCensus(t *testing.T) { contribPerfResultsSummary, processorsConfig, nil, + nil, ) } @@ -68,6 +69,7 @@ func TestStabilityTracesSAPM(t *testing.T) { contribPerfResultsSummary, processorsConfig, nil, + nil, ) } @@ -84,6 +86,7 @@ func TestStabilityTracesOTLP(t *testing.T) { contribPerfResultsSummary, processorsConfig, nil, + nil, ) } @@ -100,6 +103,7 @@ func TestStabilityTracesJaegerGRPC(t *testing.T) { contribPerfResultsSummary, processorsConfig, nil, + nil, ) } @@ -116,6 +120,7 @@ func TestStabilityTracesZipkin(t *testing.T) { contribPerfResultsSummary, processorsConfig, nil, + nil, ) } @@ -132,5 +137,6 @@ func TestStabilityTracesDatadog(t *testing.T) { contribPerfResultsSummary, processorsConfig, nil, + nil, ) } diff --git a/testbed/testbed/receivers.go b/testbed/testbed/receivers.go index 6db874822f94..829eb6a5d411 100644 --- a/testbed/testbed/receivers.go +++ b/testbed/testbed/receivers.go @@ -56,6 +56,7 @@ type BaseOTLPDataReceiver struct { retry string sendingQueue string timeout string + batcher string } func (bor *BaseOTLPDataReceiver) Start(tc consumer.Traces, mc consumer.Metrics, lc consumer.Logs) error { @@ -104,6 +105,11 @@ func (bor *BaseOTLPDataReceiver) WithTimeout(timeout string) *BaseOTLPDataReceiv return bor } +func (bor *BaseOTLPDataReceiver) WithBatcher(batcher string) *BaseOTLPDataReceiver { + bor.batcher = batcher + return bor +} + func (bor *BaseOTLPDataReceiver) Stop() error { // we reuse the receiver across signals. Shutting down the log receiver shuts down the metrics and traces receiver. return bor.logReceiver.Shutdown(context.Background()) @@ -125,8 +131,9 @@ func (bor *BaseOTLPDataReceiver) GenConfigYAMLStr() string { %s %s %s + %s tls: - insecure: true`, bor.exporterType, addr, bor.retry, bor.sendingQueue, bor.timeout) + insecure: true`, bor.exporterType, addr, bor.retry, bor.sendingQueue, bor.timeout, bor.batcher) comp := "none" if bor.compression != "" { comp = bor.compression diff --git a/testbed/tests/batcher_test.go b/testbed/tests/batcher_test.go new file mode 100644 index 000000000000..1de09f555534 --- /dev/null +++ b/testbed/tests/batcher_test.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package tests contains test cases. To run the tests go to tests directory and run: +// RUN_TESTBED=1 go test -v + +//go:build batcher +// +build batcher + +package tests + +// The tests in this file measure the effect of batching on collector performance. +// Their primary intent is to measure the performance impact of https://github.com/open-telemetry/opentelemetry-collector/issues/8122. + +import ( + "fmt" + "slices" + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" + "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" +) + +type batcherTestSpec struct { + name string + withQueue bool + withBatchProcessor bool + withExporterBatcher bool + batchSize int + processors []ProcessorNameAndConfigBody + resourceSpec testbed.ResourceSpec + extensions map[string]string +} + +func TestLog10kDPSNoProcessors(t *testing.T) { + tests := []batcherTestSpec{ + { + name: "No batching, no queue", + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "No batching, queue", + withQueue: true, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with batch processor, no queue", + batchSize: 1000, + withBatchProcessor: true, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with batch processor, queue", + batchSize: 1000, + withBatchProcessor: true, + withQueue: true, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with exporter batcher, no queue", + withExporterBatcher: true, + batchSize: 1000, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with exporter batcher, queue", + withExporterBatcher: true, + withQueue: true, + batchSize: 1000, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sender := testbed.NewOTLPLogsDataSender(testbed.DefaultHost, testutil.GetAvailablePort(t)) + receiver := testbed.NewOTLPDataReceiver(testutil.GetAvailablePort(t)) + receiver.WithRetry(` + retry_on_failure: + enabled: true +`) + if test.withQueue { + receiver.WithQueue(` + sending_queue: + enabled: true +`) + } + + if test.withExporterBatcher { + receiver.WithBatcher(fmt.Sprintf(` + batcher: + enabled: true + min_size_items: %d +`, test.batchSize)) + } + + processors := slices.Clone(test.processors) + if test.withBatchProcessor { + processors = slices.Insert(processors, 0, ProcessorNameAndConfigBody{ + Name: "batch", + Body: fmt.Sprintf(` + batch: + send_batch_size: %d +`, test.batchSize), + }) + } + loadOptions := &testbed.LoadOptions{ + Parallel: 10, + ItemsPerBatch: 10, + } + Scenario10kItemsPerSecond(t, sender, receiver, test.resourceSpec, performanceResultsSummary, processors, test.extensions, loadOptions) + }) + } +} + +func TestLog10kDPSWithProcessors(t *testing.T) { + processors := []ProcessorNameAndConfigBody{ + { + Name: "filter", + Body: ` + filter: + logs: + log_record: + - not IsMatch(attributes["batch_index"], "batch_.+") +`, + }, + { + Name: "transform", + Body: ` + transform: + log_statements: + - context: log + statements: + - set(resource.attributes["batch_index"], attributes["batch_index"]) + - set(attributes["counter"], ExtractPatterns(body, "Load Generator Counter (?P.+)")) +`, + }, + } + tests := []batcherTestSpec{ + { + name: "No batching, no queue", + processors: processors, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "No batching, queue", + processors: processors, + withQueue: true, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with batch processor, no queue", + processors: processors, + batchSize: 1000, + withBatchProcessor: true, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with batch processor, queue", + processors: processors, + batchSize: 1000, + withBatchProcessor: true, + withQueue: true, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with exporter batcher, no queue", + processors: processors, + withExporterBatcher: true, + batchSize: 1000, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + { + name: "Batch size 1000 with exporter batcher, queue", + processors: processors, + withExporterBatcher: true, + withQueue: true, + batchSize: 1000, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 120, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sender := testbed.NewOTLPLogsDataSender(testbed.DefaultHost, testutil.GetAvailablePort(t)) + receiver := testbed.NewOTLPDataReceiver(testutil.GetAvailablePort(t)) + receiver.WithRetry(` + retry_on_failure: + enabled: true +`) + if test.withQueue { + receiver.WithQueue(` + sending_queue: + enabled: true + queue_size: 10 +`) + } + + if test.withExporterBatcher { + receiver.WithBatcher(fmt.Sprintf(` + batcher: + enabled: true + min_size_items: %d +`, test.batchSize)) + } + + testProcessors := slices.Clone(test.processors) + if test.withBatchProcessor { + processors = slices.Insert(testProcessors, 0, ProcessorNameAndConfigBody{ + Name: "batch", + Body: fmt.Sprintf(` + batch: + send_batch_size: %d +`, test.batchSize), + }) + } + loadOptions := &testbed.LoadOptions{ + Parallel: 10, + ItemsPerBatch: 10, + } + Scenario10kItemsPerSecond(t, sender, receiver, test.resourceSpec, performanceResultsSummary, testProcessors, test.extensions, loadOptions) + }) + } +} diff --git a/testbed/tests/log_test.go b/testbed/tests/log_test.go index 2756ce2ffe97..f46e2abd04f2 100644 --- a/testbed/tests/log_test.go +++ b/testbed/tests/log_test.go @@ -178,6 +178,7 @@ func TestLog10kDPS(t *testing.T) { performanceResultsSummary, processors, test.extensions, + nil, ) }) } diff --git a/testbed/tests/metric_test.go b/testbed/tests/metric_test.go index 6eb8b7fd9829..2de78612cceb 100644 --- a/testbed/tests/metric_test.go +++ b/testbed/tests/metric_test.go @@ -88,6 +88,7 @@ func TestMetric10kDPS(t *testing.T) { performanceResultsSummary, nil, nil, + nil, ) }) } diff --git a/testbed/tests/scenarios.go b/testbed/tests/scenarios.go index a2e3ea868a5c..cdb897c3b260 100644 --- a/testbed/tests/scenarios.go +++ b/testbed/tests/scenarios.go @@ -137,15 +137,19 @@ func Scenario10kItemsPerSecond( resultsSummary testbed.TestResultsSummary, processors []ProcessorNameAndConfigBody, extensions map[string]string, + loadOptions *testbed.LoadOptions, ) { resultDir, err := filepath.Abs(path.Join("results", t.Name())) require.NoError(t, err) - options := testbed.LoadOptions{ - DataItemsPerSecond: 10_000, - ItemsPerBatch: 100, - Parallel: 1, + if loadOptions == nil { + loadOptions = &testbed.LoadOptions{ + ItemsPerBatch: 100, + Parallel: 1, + } } + loadOptions.DataItemsPerSecond = 10_000 + agentProc := testbed.NewChildProcessCollector(testbed.WithEnvVar("GOMAXPROCS", "2")) configStr := createConfigYaml(t, sender, receiver, resultDir, processors, extensions) @@ -153,7 +157,7 @@ func Scenario10kItemsPerSecond( require.NoError(t, err) defer configCleanup() - dataProvider := testbed.NewPerfTestDataProvider(options) + dataProvider := testbed.NewPerfTestDataProvider(*loadOptions) tc := testbed.NewTestCase( t, dataProvider, @@ -169,7 +173,7 @@ func Scenario10kItemsPerSecond( tc.StartBackend() tc.StartAgent() - tc.StartLoad(options) + tc.StartLoad(*loadOptions) tc.WaitFor(func() bool { return tc.LoadGenerator.DataItemsSent() > 0 }, "load generator started") diff --git a/testbed/tests/trace_test.go b/testbed/tests/trace_test.go index 8f53311d5419..7b6007727e26 100644 --- a/testbed/tests/trace_test.go +++ b/testbed/tests/trace_test.go @@ -149,6 +149,7 @@ func TestTrace10kSPS(t *testing.T) { performanceResultsSummary, processors, nil, + nil, ) }) }