From 9460773e5cda6423676b8760021ed96836540f0d Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Mon, 20 Jan 2025 09:56:09 +0100 Subject: [PATCH 01/16] Basic instrumentation for Bedrock runtime Converse api (#3161) * botocore: add extension for bedrock runtime api * Add tests and handle only non streaming responses * Make it explicit we are handling only the converse operation * Simplify test since all models behaves the same * Add test for error case and rework things a bit * Add converse example * Generate workflows * Add changelog --- .github/workflows/core_contrib_test_0.yml | 28 +- .github/workflows/test_0.yml | 252 +++++++++--------- .github/workflows/test_1.yml | 216 +++++++-------- .github/workflows/test_2.yml | 108 ++++++++ CHANGELOG.md | 2 + .../examples/bedrock-runtime/zero-code/.env | 15 ++ .../bedrock-runtime/zero-code/README.rst | 50 ++++ .../bedrock-runtime/zero-code/converse.py | 22 ++ .../zero-code/requirements.txt | 6 + .../botocore/environment_variables.py | 3 + .../botocore/extensions/__init__.py | 1 + .../botocore/extensions/bedrock.py | 149 +++++++++++ ...quirements.txt => test-requirements-0.txt} | 1 + .../test-requirements-1.txt | 39 +++ .../tests/bedrock_utils.py | 116 ++++++++ .../cassettes/test_converse_with_content.yaml | 93 +++++++ .../test_converse_with_invalid_model.yaml | 69 +++++ .../tests/conftest.py | 190 +++++++++++++ .../tests/test_botocore_bedrock.py | 102 +++++++ tox.ini | 11 +- 20 files changed, 1234 insertions(+), 239 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py rename instrumentation/opentelemetry-instrumentation-botocore/{test-requirements.txt => test-requirements-0.txt} (97%) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py diff --git a/.github/workflows/core_contrib_test_0.yml b/.github/workflows/core_contrib_test_0.yml index 7ab737c657..bbc43ce736 100644 --- a/.github/workflows/core_contrib_test_0.yml +++ b/.github/workflows/core_contrib_test_0.yml @@ -349,8 +349,8 @@ jobs: - name: Run tests run: tox -e py38-test-instrumentation-aws-lambda -- -ra - py38-test-instrumentation-botocore: - name: instrumentation-botocore + py38-test-instrumentation-botocore-0: + name: instrumentation-botocore-0 runs-on: ubuntu-latest steps: - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} @@ -369,7 +369,29 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py38-test-instrumentation-botocore -- -ra + run: tox -e py38-test-instrumentation-botocore-0 -- -ra + + py38-test-instrumentation-botocore-1: + name: instrumentation-botocore-1 + runs-on: ubuntu-latest + steps: + - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} + uses: actions/checkout@v4 + with: + repository: open-telemetry/opentelemetry-python-contrib + ref: ${{ env.CONTRIB_REPO_SHA }} + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + architecture: "x64" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-botocore-1 -- -ra py38-test-instrumentation-boto3sqs: name: instrumentation-boto3sqs diff --git a/.github/workflows/test_0.yml b/.github/workflows/test_0.yml index bbfb5a6865..b43a46b94f 100644 --- a/.github/workflows/test_0.yml +++ b/.github/workflows/test_0.yml @@ -1852,8 +1852,8 @@ jobs: - name: Run tests run: tox -e pypy3-test-instrumentation-aws-lambda -- -ra - py38-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.8 Ubuntu + py38-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.8 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1868,10 +1868,28 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py38-test-instrumentation-botocore -- -ra + run: tox -e py38-test-instrumentation-botocore-0 -- -ra - py39-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.9 Ubuntu + py38-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-botocore-1 -- -ra + + py39-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.9 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1886,10 +1904,46 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py39-test-instrumentation-botocore -- -ra + run: tox -e py39-test-instrumentation-botocore-0 -- -ra + + py39-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-instrumentation-botocore-1 -- -ra + + py310-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-instrumentation-botocore-0 -- -ra - py310-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.10 Ubuntu + py310-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.10 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1904,10 +1958,10 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py310-test-instrumentation-botocore -- -ra + run: tox -e py310-test-instrumentation-botocore-1 -- -ra - py311-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.11 Ubuntu + py311-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.11 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1922,10 +1976,28 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py311-test-instrumentation-botocore -- -ra + run: tox -e py311-test-instrumentation-botocore-0 -- -ra - py312-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.12 Ubuntu + py311-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py311-test-instrumentation-botocore-1 -- -ra + + py312-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.12 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1940,10 +2012,46 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py312-test-instrumentation-botocore -- -ra + run: tox -e py312-test-instrumentation-botocore-0 -- -ra + + py312-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-botocore-1 -- -ra + + py313-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-instrumentation-botocore-0 -- -ra - py313-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.13 Ubuntu + py313-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.13 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1958,7 +2066,7 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py313-test-instrumentation-botocore -- -ra + run: tox -e py313-test-instrumentation-botocore-1 -- -ra py38-test-instrumentation-boto3sqs_ubuntu-latest: name: instrumentation-boto3sqs 3.8 Ubuntu @@ -4407,111 +4515,3 @@ jobs: - name: Run tests run: tox -e py38-test-instrumentation-requests -- -ra - - py39-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py39-test-instrumentation-requests -- -ra - - py310-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py310-test-instrumentation-requests -- -ra - - py311-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py311-test-instrumentation-requests -- -ra - - py312-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py312-test-instrumentation-requests -- -ra - - py313-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.13 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.13 - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py313-test-instrumentation-requests -- -ra - - py38-test-instrumentation-starlette_ubuntu-latest: - name: instrumentation-starlette 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py38-test-instrumentation-starlette -- -ra diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml index 7cd1b5ed61..c1712a5367 100644 --- a/.github/workflows/test_1.yml +++ b/.github/workflows/test_1.yml @@ -16,6 +16,114 @@ env: jobs: + py39-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-instrumentation-requests -- -ra + + py310-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-instrumentation-requests -- -ra + + py311-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py311-test-instrumentation-requests -- -ra + + py312-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-requests -- -ra + + py313-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-instrumentation-requests -- -ra + + py38-test-instrumentation-starlette_ubuntu-latest: + name: instrumentation-starlette 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-starlette -- -ra + py39-test-instrumentation-starlette_ubuntu-latest: name: instrumentation-starlette 3.9 Ubuntu runs-on: ubuntu-latest @@ -4407,111 +4515,3 @@ jobs: - name: Run tests run: tox -e pypy3-test-util-http -- -ra - - py38-test-propagator-aws-xray-0_ubuntu-latest: - name: propagator-aws-xray-0 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py38-test-propagator-aws-xray-0 -- -ra - - py38-test-propagator-aws-xray-1_ubuntu-latest: - name: propagator-aws-xray-1 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py38-test-propagator-aws-xray-1 -- -ra - - py39-test-propagator-aws-xray-0_ubuntu-latest: - name: propagator-aws-xray-0 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py39-test-propagator-aws-xray-0 -- -ra - - py39-test-propagator-aws-xray-1_ubuntu-latest: - name: propagator-aws-xray-1 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py39-test-propagator-aws-xray-1 -- -ra - - py310-test-propagator-aws-xray-0_ubuntu-latest: - name: propagator-aws-xray-0 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py310-test-propagator-aws-xray-0 -- -ra - - py310-test-propagator-aws-xray-1_ubuntu-latest: - name: propagator-aws-xray-1 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py310-test-propagator-aws-xray-1 -- -ra diff --git a/.github/workflows/test_2.yml b/.github/workflows/test_2.yml index fd1dcb00e0..bc52c4eba4 100644 --- a/.github/workflows/test_2.yml +++ b/.github/workflows/test_2.yml @@ -16,6 +16,114 @@ env: jobs: + py38-test-propagator-aws-xray-0_ubuntu-latest: + name: propagator-aws-xray-0 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-propagator-aws-xray-0 -- -ra + + py38-test-propagator-aws-xray-1_ubuntu-latest: + name: propagator-aws-xray-1 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-propagator-aws-xray-1 -- -ra + + py39-test-propagator-aws-xray-0_ubuntu-latest: + name: propagator-aws-xray-0 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-propagator-aws-xray-0 -- -ra + + py39-test-propagator-aws-xray-1_ubuntu-latest: + name: propagator-aws-xray-1 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-propagator-aws-xray-1 -- -ra + + py310-test-propagator-aws-xray-0_ubuntu-latest: + name: propagator-aws-xray-0 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-propagator-aws-xray-0 -- -ra + + py310-test-propagator-aws-xray-1_ubuntu-latest: + name: propagator-aws-xray-1 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-propagator-aws-xray-1 -- -ra + py311-test-propagator-aws-xray-0_ubuntu-latest: name: propagator-aws-xray-0 3.11 Ubuntu runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e40e73270..39e3fcdba8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3129](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3129)) - `opentelemetry-util-http` Add `py.typed` file to enable PEP 561 ([#3127](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3127)) +- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock Converse API + ([#3161](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3161)) ### Fixed diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env new file mode 100644 index 0000000000..0ab6418c72 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env @@ -0,0 +1,15 @@ +# Update this with your real values +AWS_ACCESS_KEY_ID=key +AWS_SECRET_ACCESS_KEY=secret +AWS_DEFAULT_REGION=eu-central-1 +# Uncomment and set if your credentials are temporary +# AWS_SESSION_TOKEN= + +# Uncomment and change to your OTLP endpoint +# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +# OTEL_EXPORTER_OTLP_PROTOCOL=grpc + +OTEL_SERVICE_NAME=opentelemetry-python-bedrock + +# Uncomment if your OTLP endpoint doesn't support logs +# OTEL_LOGS_EXPORTER=console diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst new file mode 100644 index 0000000000..37e1db9b30 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst @@ -0,0 +1,50 @@ +Bedrock Zero-Code Instrumentation Example +========================================= + +This is an example of how to instrument Bedrock calls with zero code changes, +using `opentelemetry-instrument`. + +When examples are run, it exports traces and logs to an OTLP +compatible endpoint. Traces include details such as the model used and the +duration of the chat request. Logs capture the chat request and the generated +response, providing a comprehensive view of the performance and behavior of +your OpenAI requests. + +Note: `.env <.env>`_ file configures additional environment variables: + +- `OTEL_LOGS_EXPORTER=otlp` to specify exporter type. + +Available examples +------------------ + +- `converse.py` uses `bedrock-runtime` `Converse API _`. + +Setup +----- + +Minimally, update the `.env <.env>`_ file with your "AWS_SECRET_ACCESS_KEY", +"AWS_SECRET_ACCESS_KEY", "AWS_DEFAULT_REGION" and if you are using temporary +credentials "AWS_SESSION_TOKEN". An +OTLP compatible endpoint should be listening for traces and logs on +http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well. + +Next, set up a virtual environment like this: + +:: + + python3 -m venv .venv + source .venv/bin/activate + pip install "python-dotenv[cli]" + pip install -r requirements.txt + +Run +--- + +Run the example like this: + +:: + + dotenv run -- opentelemetry-instrument python converse.py + +You should see a poem generated by Bedrock while traces exported to your +configured observability tool. diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py new file mode 100644 index 0000000000..b6ce55d50d --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py @@ -0,0 +1,22 @@ +import os + +import boto3 + + +def main(): + client = boto3.client("bedrock-runtime") + response = client.converse( + modelId=os.getenv("CHAT_MODEL", "amazon.titan-text-lite-v1"), + messages=[ + { + "role": "user", + "content": [{"text": "Write a short poem on OpenTelemetry."}], + }, + ], + ) + + print(response["output"]["message"]["content"][0]["text"]) + + +if __name__ == "__main__": + main() diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt new file mode 100644 index 0000000000..dea6c40109 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt @@ -0,0 +1,6 @@ +boto3~=1.35.99 + +opentelemetry-sdk~=1.29.0 +opentelemetry-exporter-otlp-proto-grpc~=1.29.0 +opentelemetry-distro~=0.50b0 +opentelemetry-instrumentation-botocore~=0.50b0 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py new file mode 100644 index 0000000000..02bdfe68af --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py @@ -0,0 +1,3 @@ +OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = ( + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" +) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py index 85a4904022..c4624ababd 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py @@ -32,6 +32,7 @@ def loader(): _KNOWN_EXTENSIONS = { + "bedrock-runtime": _lazy_load(".bedrock", "_BedrockRuntimeExtension"), "dynamodb": _lazy_load(".dynamodb", "_DynamoDbExtension"), "lambda": _lazy_load(".lmbd", "_LambdaExtension"), "sns": _lazy_load(".sns", "_SnsExtension"), diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py new file mode 100644 index 0000000000..fe826da603 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -0,0 +1,149 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Includes work from: +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import logging +from typing import Any + +from opentelemetry.instrumentation.botocore.extensions.types import ( + _AttributeMapT, + _AwsSdkExtension, + _BotoClientErrorT, +) +from opentelemetry.semconv._incubating.attributes.error_attributes import ( + ERROR_TYPE, +) +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( + GEN_AI_OPERATION_NAME, + GEN_AI_REQUEST_MAX_TOKENS, + GEN_AI_REQUEST_MODEL, + GEN_AI_REQUEST_STOP_SEQUENCES, + GEN_AI_REQUEST_TEMPERATURE, + GEN_AI_REQUEST_TOP_P, + GEN_AI_RESPONSE_FINISH_REASONS, + GEN_AI_SYSTEM, + GEN_AI_USAGE_INPUT_TOKENS, + GEN_AI_USAGE_OUTPUT_TOKENS, + GenAiOperationNameValues, + GenAiSystemValues, +) +from opentelemetry.trace.span import Span +from opentelemetry.trace.status import Status, StatusCode + +_logger = logging.getLogger(__name__) + +_MODEL_ID_KEY: str = "modelId" + + +class _BedrockRuntimeExtension(_AwsSdkExtension): + """ + This class is an extension for + Amazon Bedrock Runtime. + """ + + _HANDLED_OPERATIONS = {"Converse"} + + def extract_attributes(self, attributes: _AttributeMapT): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + + attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value + + model_id = self._call_context.params.get(_MODEL_ID_KEY) + if model_id: + attributes[GEN_AI_REQUEST_MODEL] = model_id + attributes[GEN_AI_OPERATION_NAME] = ( + GenAiOperationNameValues.CHAT.value + ) + + if inference_config := self._call_context.params.get( + "inferenceConfig" + ): + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + inference_config.get("temperature"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TOP_P, + inference_config.get("topP"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + inference_config.get("maxTokens"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_STOP_SEQUENCES, + inference_config.get("stopSequences"), + ) + + @staticmethod + def _set_if_not_none(attributes, key, value): + if value is not None: + attributes[key] = value + + def before_service_call(self, span: Span): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + + if not span.is_recording(): + return + + operation_name = span.attributes.get(GEN_AI_OPERATION_NAME, "") + request_model = span.attributes.get(GEN_AI_REQUEST_MODEL, "") + # avoid setting to an empty string if are not available + if operation_name and request_model: + span.update_name(f"{operation_name} {request_model}") + + def on_success(self, span: Span, result: dict[str, Any]): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + + if not span.is_recording(): + return + + if usage := result.get("usage"): + if input_tokens := usage.get("inputTokens"): + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, + input_tokens, + ) + if output_tokens := usage.get("outputTokens"): + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, + output_tokens, + ) + + if stop_reason := result.get("stopReason"): + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, + [stop_reason], + ) + + def on_error(self, span: Span, exception: _BotoClientErrorT): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + + span.set_status(Status(StatusCode.ERROR, str(exception))) + if span.is_recording(): + span.set_attribute(ERROR_TYPE, type(exception).__qualname__) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt similarity index 97% rename from instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt rename to instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt index aa5f89859f..ee28a1f2ba 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt +++ b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt @@ -19,6 +19,7 @@ pluggy==1.5.0 py-cpuinfo==9.0.0 pycparser==2.21 pytest==7.4.4 +pytest-vcr==1.0.2 python-dateutil==2.8.2 pytz==2024.1 PyYAML==6.0.1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt new file mode 100644 index 0000000000..c4695ff27c --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt @@ -0,0 +1,39 @@ +asgiref==3.8.1 +aws-xray-sdk==2.12.1 +boto3==1.35.56 +botocore==1.35.56 +certifi==2024.7.4 +cffi==1.17.0 +charset-normalizer==3.3.2 +cryptography==43.0.1 +Deprecated==1.2.14 +docker==7.0.0 +idna==3.7 +iniconfig==2.0.0 +Jinja2==3.1.4 +jmespath==1.0.1 +MarkupSafe==2.1.5 +moto==5.0.9 +packaging==24.0 +pluggy==1.5.0 +py-cpuinfo==9.0.0 +pycparser==2.21 +pytest==7.4.4 +pytest-vcr==1.0.2 +python-dateutil==2.8.2 +pytz==2024.1 +PyYAML==6.0.1 +requests==2.32.3 +responses==0.25.0 +s3transfer==0.10.0 +six==1.16.0 +tomli==2.0.1 +typing_extensions==4.12.2 +urllib3==1.26.19 +Werkzeug==3.0.6 +wrapt==1.16.0 +xmltodict==0.13.0 +zipp==3.19.2 +-e opentelemetry-instrumentation +-e propagator/opentelemetry-propagator-aws-xray +-e instrumentation/opentelemetry-instrumentation-botocore diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py new file mode 100644 index 0000000000..6d2415432f --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py @@ -0,0 +1,116 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any + +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + + +def assert_completion_attributes( + span: ReadableSpan, + request_model: str, + response: dict[str, Any] | None, + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: list[str] | None = None, +): + if usage := (response and response.get("usage")): + input_tokens = usage["inputTokens"] + output_tokens = usage["outputTokens"] + else: + input_tokens, output_tokens = None, None + + if response: + finish_reason = (response["stopReason"],) + else: + finish_reason = None + + return assert_all_attributes( + span, + request_model, + input_tokens, + output_tokens, + finish_reason, + operation_name, + request_top_p, + request_temperature, + request_max_tokens, + tuple(request_stop_sequences) + if request_stop_sequences is not None + else request_stop_sequences, + ) + + +def assert_equal_or_not_present(value, attribute_name, span): + if value: + assert value == span.attributes[attribute_name] + else: + assert attribute_name not in span.attributes + + +def assert_all_attributes( + span: ReadableSpan, + request_model: str, + input_tokens: int | None = None, + output_tokens: int | None = None, + finish_reason: tuple[str] | None = None, + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: tuple[str] | None = None, +): + assert span.name == f"{operation_name} {request_model}" + assert ( + operation_name + == span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + ) + assert ( + GenAIAttributes.GenAiSystemValues.AWS_BEDROCK.value + == span.attributes[GenAIAttributes.GEN_AI_SYSTEM] + ) + assert ( + request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] + ) + + assert_equal_or_not_present( + input_tokens, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, span + ) + assert_equal_or_not_present( + output_tokens, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, span + ) + assert_equal_or_not_present( + finish_reason, GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, span + ) + assert_equal_or_not_present( + request_top_p, GenAIAttributes.GEN_AI_REQUEST_TOP_P, span + ) + assert_equal_or_not_present( + request_temperature, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, span + ) + assert_equal_or_not_present( + request_max_tokens, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, span + ) + assert_equal_or_not_present( + request_stop_sequences, + GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES, + span, + ) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml new file mode 100644 index 0000000000..8060f02076 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml @@ -0,0 +1,93 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "role": "user", + "content": [ + { + "text": "Say this is a test" + } + ] + } + ], + "inferenceConfig": { + "maxTokens": 10, + "temperature": 0.8, + "topP": 1, + "stopSequences": [ + "|" + ] + } + } + headers: + Content-Length: + - '170' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNDEyMzFUMTMyMDQxWg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWY1MWY4NGM1LTNiZjk4YzY0YWMyNmJhNTk1OWJjODgxNjtQYXJlbnQ9YjNmOGZhM2Mz + MDc1NGEzZjtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + OTIyMjczMzItY2I5ZS00NGM1LTliZGUtYjU0NmJmODkxYmEy + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/amazon.titan-text-lite-v1/converse + response: + body: + string: |- + { + "metrics": { + "latencyMs": 811 + }, + "output": { + "message": { + "content": [ + { + "text": "I am happy to assist you today" + } + ], + "role": "assistant" + } + }, + "stopReason": "max_tokens", + "usage": { + "inputTokens": 8, + "outputTokens": 10, + "totalTokens": 18 + } + } + headers: + Connection: + - keep-alive + Content-Length: + - '212' + Content-Type: + - application/json + Date: + - Tue, 31 Dec 2024 13:20:42 GMT + Set-Cookie: test_set_cookie + x-amzn-RequestId: + - 63dfbcb2-3536-4906-b10d-e5b126b3c0ae + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml new file mode 100644 index 0000000000..ecbfb6bbd0 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml @@ -0,0 +1,69 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "role": "user", + "content": [ + { + "text": "Say this is a test" + } + ] + } + ] + } + headers: + Content-Length: + - '77' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMTVUMTEwMTQ3Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWIzM2JhNTkxLTdkYmQ0ZDZmYTBmZTdmYzc2MTExOThmNztQYXJlbnQ9NzRmNmQ1NTEz + MzkzMzUxNTtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + NTQ5MmQ0NTktNzhkNi00ZWY4LTlmMDMtZTA5ODhkZGRiZDI5 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/does-not-exist/converse + response: + body: + string: |- + { + "message": "The provided model identifier is invalid." + } + headers: + Connection: + - keep-alive + Content-Length: + - '55' + Content-Type: + - application/json + Date: + - Wed, 15 Jan 2025 11:01:47 GMT + Set-Cookie: test_set_cookie + x-amzn-ErrorType: + - ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/ + x-amzn-RequestId: + - d425bf99-8a4e-4d83-8d77-a48410dd82b2 + status: + code: 400 + message: Bad Request +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py new file mode 100644 index 0000000000..271c540da7 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py @@ -0,0 +1,190 @@ +"""Unit tests configuration module.""" + +import json +import os + +import boto3 +import pytest +import yaml + +from opentelemetry.instrumentation.botocore import BotocoreInstrumentor +from opentelemetry.instrumentation.botocore.environment_variables import ( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, +) +from opentelemetry.sdk._events import EventLoggerProvider +from opentelemetry.sdk._logs import LoggerProvider +from opentelemetry.sdk._logs.export import ( + InMemoryLogExporter, + SimpleLogRecordProcessor, +) +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) + + +@pytest.fixture(scope="function", name="span_exporter") +def fixture_span_exporter(): + exporter = InMemorySpanExporter() + yield exporter + + +@pytest.fixture(scope="function", name="log_exporter") +def fixture_log_exporter(): + exporter = InMemoryLogExporter() + yield exporter + + +@pytest.fixture(scope="function", name="tracer_provider") +def fixture_tracer_provider(span_exporter): + provider = TracerProvider() + provider.add_span_processor(SimpleSpanProcessor(span_exporter)) + return provider + + +@pytest.fixture(scope="function", name="event_logger_provider") +def fixture_event_logger_provider(log_exporter): + provider = LoggerProvider() + provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) + event_logger_provider = EventLoggerProvider(provider) + + return event_logger_provider + + +@pytest.fixture +def bedrock_runtime_client(): + return boto3.client("bedrock-runtime") + + +@pytest.fixture(autouse=True) +def environment(): + if not os.getenv("AWS_ACCESS_KEY_ID"): + os.environ["AWS_ACCESS_KEY_ID"] = "test_aws_access_key_id" + if not os.getenv("AWS_SECRET_ACCESS_KEY"): + os.environ["AWS_SECRET_ACCESS_KEY"] = "test_aws_secret_key" + if not os.getenv("AWS_SESSION_TOKEN"): + os.environ["AWS_SESSION_TOKEN"] = "test_aws_session_token" + if not os.getenv("AWS_DEFAULT_REGION"): + os.environ["AWS_DEFAULT_REGION"] = "eu-central-1" + + +@pytest.fixture(scope="module") +def vcr_config(): + return { + "filter_headers": [ + ("cookie", "test_cookie"), + ("authorization", "Bearer test_aws_authorization"), + ("X-Amz-Security-Token", "test_aws_security_token"), + ], + "decode_compressed_response": True, + "before_record_response": scrub_response_headers, + } + + +@pytest.fixture(scope="function") +def instrument_no_content(tracer_provider, event_logger_provider): + os.environ.update( + {OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "False"} + ) + + instrumentor = BotocoreInstrumentor() + instrumentor.instrument( + tracer_provider=tracer_provider, + event_logger_provider=event_logger_provider, + ) + + yield instrumentor + os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None) + instrumentor.uninstrument() + + +@pytest.fixture(scope="function") +def instrument_with_content(tracer_provider, event_logger_provider): + os.environ.update( + {OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "True"} + ) + instrumentor = BotocoreInstrumentor() + instrumentor.instrument( + tracer_provider=tracer_provider, + event_logger_provider=event_logger_provider, + ) + + yield instrumentor + os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None) + instrumentor.uninstrument() + + +class LiteralBlockScalar(str): + """Formats the string as a literal block scalar, preserving whitespace and + without interpreting escape characters""" + + +def literal_block_scalar_presenter(dumper, data): + """Represents a scalar string as a literal block, via '|' syntax""" + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") + + +yaml.add_representer(LiteralBlockScalar, literal_block_scalar_presenter) + + +def process_string_value(string_value): + """Pretty-prints JSON or returns long strings as a LiteralBlockScalar""" + try: + json_data = json.loads(string_value) + return LiteralBlockScalar(json.dumps(json_data, indent=2)) + except (ValueError, TypeError): + if len(string_value) > 80: + return LiteralBlockScalar(string_value) + return string_value + + +def convert_body_to_literal(data): + """Searches the data for body strings, attempting to pretty-print JSON""" + if isinstance(data, dict): + for key, value in data.items(): + # Handle response body case (e.g., response.body.string) + if key == "body" and isinstance(value, dict) and "string" in value: + value["string"] = process_string_value(value["string"]) + + # Handle request body case (e.g., request.body) + elif key == "body" and isinstance(value, str): + data[key] = process_string_value(value) + + else: + convert_body_to_literal(value) + + elif isinstance(data, list): + for idx, choice in enumerate(data): + data[idx] = convert_body_to_literal(choice) + + return data + + +class PrettyPrintJSONBody: + """This makes request and response body recordings more readable.""" + + @staticmethod + def serialize(cassette_dict): + cassette_dict = convert_body_to_literal(cassette_dict) + return yaml.dump( + cassette_dict, default_flow_style=False, allow_unicode=True + ) + + @staticmethod + def deserialize(cassette_string): + return yaml.load(cassette_string, Loader=yaml.Loader) + + +@pytest.fixture(scope="module", autouse=True) +def fixture_vcr(vcr): + vcr.register_serializer("yaml", PrettyPrintJSONBody) + return vcr + + +def scrub_response_headers(response): + """ + This scrubs sensitive response headers. Note they are case-sensitive! + """ + response["headers"]["Set-Cookie"] = "test_set_cookie" + return response diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py new file mode 100644 index 0000000000..8de7721bc9 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -0,0 +1,102 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import boto3 +import pytest + +from opentelemetry.semconv._incubating.attributes.error_attributes import ( + ERROR_TYPE, +) +from opentelemetry.trace.status import StatusCode + +from .bedrock_utils import assert_completion_attributes + +BOTO3_VERSION = tuple(int(x) for x in boto3.__version__.split(".")) + + +@pytest.mark.skipif( + BOTO3_VERSION < (1, 35, 56), reason="Converse API not available" +) +@pytest.mark.vcr() +def test_converse_with_content( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] + + llm_model_value = "amazon.titan-text-lite-v1" + max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] + response = bedrock_runtime_client.converse( + messages=messages, + modelId=llm_model_value, + inferenceConfig={ + "maxTokens": max_tokens, + "temperature": temperature, + "topP": top_p, + "stopSequences": stop_sequences, + }, + ) + + (span,) = span_exporter.get_finished_spans() + assert_completion_attributes( + span, + llm_model_value, + response, + "chat", + top_p, + temperature, + max_tokens, + stop_sequences, + ) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + +@pytest.mark.skipif( + BOTO3_VERSION < (1, 35, 56), reason="Converse API not available" +) +@pytest.mark.vcr() +def test_converse_with_invalid_model( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] + + llm_model_value = "does-not-exist" + with pytest.raises(bedrock_runtime_client.exceptions.ValidationException): + bedrock_runtime_client.converse( + messages=messages, + modelId=llm_model_value, + ) + + (span,) = span_exporter.get_finished_spans() + assert_completion_attributes( + span, + llm_model_value, + None, + "chat", + ) + + assert span.status.status_code == StatusCode.ERROR + assert span.attributes[ERROR_TYPE] == "ValidationException" + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 diff --git a/tox.ini b/tox.ini index d198c0a836..70c6ae6a28 100644 --- a/tox.ini +++ b/tox.ini @@ -66,7 +66,7 @@ envlist = lint-instrumentation-aws-lambda ; opentelemetry-instrumentation-botocore - py3{8,9,10,11,12,13}-test-instrumentation-botocore + py3{8,9,10,11,12,13}-test-instrumentation-botocore-{0,1} ; FIXME: see https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1736 ; pypy3-test-instrumentation-botocore lint-instrumentation-botocore @@ -414,6 +414,11 @@ test_deps = opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk opentelemetry-test-utils@{env:CORE_REPO}\#egg=opentelemetry-test-utils&subdirectory=tests/opentelemetry-test-utils +pass_env = + AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY + AWS_SESSION_TOKEN + AWS_DEFAULT_REGION deps = lint: -r dev-requirements.txt @@ -518,7 +523,9 @@ deps = lint-instrumentation-urllib3: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib3/test-requirements-1.txt botocore: {[testenv]test_deps} - botocore: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt + botocore-0: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt + botocore-1: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt + lint-instrumentation-botocore: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt cassandra: {[testenv]test_deps} cassandra: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-cassandra/test-requirements.txt From 20413ef7d72a3064492a9374e5164214949e60b9 Mon Sep 17 00:00:00 2001 From: OpenTelemetry Bot <107717825+opentelemetrybot@users.noreply.github.com> Date: Tue, 21 Jan 2025 02:10:48 -0600 Subject: [PATCH 02/16] Copy changelog updates from package-release/opentelemetry-instrumentation-openai-v2/v2.1bx (#3197) Co-authored-by: Riccardo Magliocchetti --- .../opentelemetry-instrumentation-openai-v2/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md index 0d5ed666ed..91065edbc1 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md @@ -7,7 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased -## Version 2.1b0 (2025-01-17) +## Version 2.1b0 (2025-01-18) - Coerce openai response_format to semconv format ([#3073](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3073)) From 37f85bf8cc235b807557c8a1aff9fa3b52b3d111 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 21 Jan 2025 14:57:53 +0100 Subject: [PATCH 03/16] instrumentation/aws-lambda: don't print warnings outside of AWS Lambda (#3183) If we are not running inside AWS Lambda don't print warnings on missing OTel lambda extension layer. The instrumentation is installed by the OTel k8s operator and so this warning may confuse users. --- .../instrumentation/aws_lambda/__init__.py | 5 +++ .../test_aws_lambda_instrumentation_manual.py | 38 +++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py index b1f61b9ce8..a0d381e7f5 100644 --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py @@ -431,6 +431,11 @@ def _instrument(self, **kwargs): the context is extracted from the HTTP headers of an API Gateway request. """ + + # Don't try if we are not running on AWS Lambda + if "AWS_LAMBDA_FUNCTION_NAME" not in os.environ: + return + lambda_handler = os.environ.get(ORIG_HANDLER, os.environ.get(_HANDLER)) if not lambda_handler: logger.warning( diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py index 4ac1e9c873..1e8f9f0575 100644 --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import logging import os from dataclasses import dataclass from importlib import import_module, reload @@ -124,7 +126,10 @@ def setUp(self): super().setUp() self.common_env_patch = mock.patch.dict( "os.environ", - {_HANDLER: "tests.mocks.lambda_function.handler"}, + { + _HANDLER: "tests.mocks.lambda_function.handler", + "AWS_LAMBDA_FUNCTION_NAME": "mylambda", + }, ) self.common_env_patch.start() @@ -466,12 +471,14 @@ def test_lambda_handles_handler_exception(self): exc_env_patch.stop() - def test_lambda_handles_should_do_nothing_when_environment_variables_not_present( - self, + @mock.patch("opentelemetry.instrumentation.aws_lambda.logger") + def test_lambda_handles_should_do_nothing_when_aws_lambda_environment_variables_not_present( + self, logger_mock ): exc_env_patch = mock.patch.dict( "os.environ", - {_HANDLER: ""}, + {_HANDLER: "tests.mocks.lambda_function.handler"}, + clear=True, ) exc_env_patch.start() AwsLambdaInstrumentor().instrument() @@ -480,6 +487,29 @@ def test_lambda_handles_should_do_nothing_when_environment_variables_not_present self.assertEqual(len(spans), 0) exc_env_patch.stop() + logger_mock.warnings.assert_not_called() + + def test_lambda_handles_should_warn_when_handler_environment_variable_not_present( + self, + ): + exc_env_patch = mock.patch.dict( + "os.environ", + {"AWS_LAMBDA_FUNCTION_NAME": "mylambda"}, + clear=True, + ) + exc_env_patch.start() + with self.assertLogs(level=logging.WARNING) as warning: + AwsLambdaInstrumentor().instrument() + self.assertEqual(len(warning.records), 1) + self.assertIn( + "This instrumentation requires the OpenTelemetry Lambda extension installed", + warning.records[0].message, + ) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 0) + exc_env_patch.stop() + def test_uninstrument(self): AwsLambdaInstrumentor().instrument() From 3f50c08580f97519d7053bc399605edf8de9a6dd Mon Sep 17 00:00:00 2001 From: Josh Owen Date: Tue, 21 Jan 2025 15:41:56 -0500 Subject: [PATCH 04/16] psycopg2-binary support (#3186) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * psycopg2-binary support * changelog * Update CHANGELOG.md * Update pyproject.toml * fix * lint * added test config * Update bootstrap_gen.py * update tox * Update tox.ini * regenerate workflows * workflows --------- Co-authored-by: Emídio Neto <9735060+emdneto@users.noreply.github.com> Co-authored-by: Riccardo Magliocchetti --- .github/workflows/core_contrib_test_0.yml | 22 ++ .github/workflows/test_1.yml | 216 +++++++++--------- .github/workflows/test_2.yml | 108 +++++++++ CHANGELOG.md | 2 + instrumentation/README.md | 2 +- .../pyproject.toml | 1 + .../instrumentation/psycopg2/__init__.py | 22 +- .../instrumentation/psycopg2/package.py | 8 +- .../test-requirements-binary.txt | 15 ++ .../instrumentation/bootstrap_gen.py | 4 + tox.ini | 7 + 11 files changed, 296 insertions(+), 111 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-psycopg2/test-requirements-binary.txt diff --git a/.github/workflows/core_contrib_test_0.yml b/.github/workflows/core_contrib_test_0.yml index bbc43ce736..6a70ce8380 100644 --- a/.github/workflows/core_contrib_test_0.yml +++ b/.github/workflows/core_contrib_test_0.yml @@ -1075,6 +1075,28 @@ jobs: - name: Run tests run: tox -e py38-test-instrumentation-psycopg2 -- -ra + py38-test-instrumentation-psycopg2-binary: + name: instrumentation-psycopg2-binary + runs-on: ubuntu-latest + steps: + - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} + uses: actions/checkout@v4 + with: + repository: open-telemetry/opentelemetry-python-contrib + ref: ${{ env.CONTRIB_REPO_SHA }} + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + architecture: "x64" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-psycopg2-binary -- -ra + py38-test-instrumentation-psycopg: name: instrumentation-psycopg runs-on: ubuntu-latest diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml index c1712a5367..b27fe28466 100644 --- a/.github/workflows/test_1.yml +++ b/.github/workflows/test_1.yml @@ -1222,6 +1222,114 @@ jobs: - name: Run tests run: tox -e py313-test-instrumentation-psycopg2 -- -ra + py38-test-instrumentation-psycopg2-binary_ubuntu-latest: + name: instrumentation-psycopg2-binary 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-psycopg2-binary -- -ra + + py39-test-instrumentation-psycopg2-binary_ubuntu-latest: + name: instrumentation-psycopg2-binary 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-instrumentation-psycopg2-binary -- -ra + + py310-test-instrumentation-psycopg2-binary_ubuntu-latest: + name: instrumentation-psycopg2-binary 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-instrumentation-psycopg2-binary -- -ra + + py311-test-instrumentation-psycopg2-binary_ubuntu-latest: + name: instrumentation-psycopg2-binary 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py311-test-instrumentation-psycopg2-binary -- -ra + + py312-test-instrumentation-psycopg2-binary_ubuntu-latest: + name: instrumentation-psycopg2-binary 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-psycopg2-binary -- -ra + + py313-test-instrumentation-psycopg2-binary_ubuntu-latest: + name: instrumentation-psycopg2-binary 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-instrumentation-psycopg2-binary -- -ra + py38-test-instrumentation-psycopg_ubuntu-latest: name: instrumentation-psycopg 3.8 Ubuntu runs-on: ubuntu-latest @@ -4407,111 +4515,3 @@ jobs: - name: Run tests run: tox -e py38-test-util-http -- -ra - - py39-test-util-http_ubuntu-latest: - name: util-http 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py39-test-util-http -- -ra - - py310-test-util-http_ubuntu-latest: - name: util-http 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py310-test-util-http -- -ra - - py311-test-util-http_ubuntu-latest: - name: util-http 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py311-test-util-http -- -ra - - py312-test-util-http_ubuntu-latest: - name: util-http 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py312-test-util-http -- -ra - - py313-test-util-http_ubuntu-latest: - name: util-http 3.13 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.13 - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py313-test-util-http -- -ra - - pypy3-test-util-http_ubuntu-latest: - name: util-http pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e pypy3-test-util-http -- -ra diff --git a/.github/workflows/test_2.yml b/.github/workflows/test_2.yml index bc52c4eba4..d9b622c5c3 100644 --- a/.github/workflows/test_2.yml +++ b/.github/workflows/test_2.yml @@ -16,6 +16,114 @@ env: jobs: + py39-test-util-http_ubuntu-latest: + name: util-http 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-util-http -- -ra + + py310-test-util-http_ubuntu-latest: + name: util-http 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-util-http -- -ra + + py311-test-util-http_ubuntu-latest: + name: util-http 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py311-test-util-http -- -ra + + py312-test-util-http_ubuntu-latest: + name: util-http 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-util-http -- -ra + + py313-test-util-http_ubuntu-latest: + name: util-http 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-util-http -- -ra + + pypy3-test-util-http_ubuntu-latest: + name: util-http pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e pypy3-test-util-http -- -ra + py38-test-propagator-aws-xray-0_ubuntu-latest: name: propagator-aws-xray-0 3.8 Ubuntu runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 39e3fcdba8..3838bd844f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3129](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3129)) - `opentelemetry-util-http` Add `py.typed` file to enable PEP 561 ([#3127](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3127)) +- `opentelemetry-instrumentation-psycopg2` Add support for psycopg2-binary + ([#3186](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3186)) - `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock Converse API ([#3161](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3161)) diff --git a/instrumentation/README.md b/instrumentation/README.md index 75341dad9a..a229951b4b 100644 --- a/instrumentation/README.md +++ b/instrumentation/README.md @@ -32,7 +32,7 @@ | [opentelemetry-instrumentation-mysqlclient](./opentelemetry-instrumentation-mysqlclient) | mysqlclient < 3 | No | experimental | [opentelemetry-instrumentation-pika](./opentelemetry-instrumentation-pika) | pika >= 0.12.0 | No | experimental | [opentelemetry-instrumentation-psycopg](./opentelemetry-instrumentation-psycopg) | psycopg >= 3.1.0 | No | experimental -| [opentelemetry-instrumentation-psycopg2](./opentelemetry-instrumentation-psycopg2) | psycopg2 >= 2.7.3.1 | No | experimental +| [opentelemetry-instrumentation-psycopg2](./opentelemetry-instrumentation-psycopg2) | psycopg2 >= 2.7.3.1,psycopg2-binary >= 2.7.3.1 | No | experimental | [opentelemetry-instrumentation-pymemcache](./opentelemetry-instrumentation-pymemcache) | pymemcache >= 1.3.5, < 5 | No | experimental | [opentelemetry-instrumentation-pymongo](./opentelemetry-instrumentation-pymongo) | pymongo >= 3.1, < 5.0 | No | experimental | [opentelemetry-instrumentation-pymysql](./opentelemetry-instrumentation-pymysql) | PyMySQL < 2 | No | experimental diff --git a/instrumentation/opentelemetry-instrumentation-psycopg2/pyproject.toml b/instrumentation/opentelemetry-instrumentation-psycopg2/pyproject.toml index c8ce3e8dfa..54f5bf6f3e 100644 --- a/instrumentation/opentelemetry-instrumentation-psycopg2/pyproject.toml +++ b/instrumentation/opentelemetry-instrumentation-psycopg2/pyproject.toml @@ -34,6 +34,7 @@ dependencies = [ [project.optional-dependencies] instruments = [ "psycopg2 >= 2.7.3.1", + "psycopg2-binary >= 2.7.3.1", ] [project.entry-points.opentelemetry_instrumentor] diff --git a/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/__init__.py b/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/__init__.py index f03ad1de0d..022c59f031 100644 --- a/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/__init__.py @@ -139,6 +139,7 @@ import logging import typing +from importlib.metadata import PackageNotFoundError, distribution from typing import Collection import psycopg2 @@ -149,7 +150,11 @@ from opentelemetry.instrumentation import dbapi from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.instrumentation.psycopg2.package import _instruments +from opentelemetry.instrumentation.psycopg2.package import ( + _instruments, + _instruments_psycopg2, + _instruments_psycopg2_binary, +) from opentelemetry.instrumentation.psycopg2.version import __version__ _logger = logging.getLogger(__name__) @@ -167,6 +172,21 @@ class Psycopg2Instrumentor(BaseInstrumentor): _DATABASE_SYSTEM = "postgresql" def instrumentation_dependencies(self) -> Collection[str]: + # Determine which package of psycopg2 is installed + # Right now there are two packages, psycopg2 and psycopg2-binary + # The latter is a binary wheel package that does not require a compiler + try: + distribution("psycopg2") + return (_instruments_psycopg2,) + except PackageNotFoundError: + pass + + try: + distribution("psycopg2-binary") + return (_instruments_psycopg2_binary,) + except PackageNotFoundError: + pass + return _instruments def _instrument(self, **kwargs): diff --git a/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/package.py b/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/package.py index 9757a8df79..b1bf92901a 100644 --- a/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/package.py +++ b/instrumentation/opentelemetry-instrumentation-psycopg2/src/opentelemetry/instrumentation/psycopg2/package.py @@ -13,4 +13,10 @@ # limitations under the License. -_instruments = ("psycopg2 >= 2.7.3.1",) +_instruments_psycopg2 = "psycopg2 >= 2.7.3.1" +_instruments_psycopg2_binary = "psycopg2-binary >= 2.7.3.1" + +_instruments = ( + _instruments_psycopg2, + _instruments_psycopg2_binary, +) diff --git a/instrumentation/opentelemetry-instrumentation-psycopg2/test-requirements-binary.txt b/instrumentation/opentelemetry-instrumentation-psycopg2/test-requirements-binary.txt new file mode 100644 index 0000000000..80fa036b99 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-psycopg2/test-requirements-binary.txt @@ -0,0 +1,15 @@ +asgiref==3.8.1 +Deprecated==1.2.14 +iniconfig==2.0.0 +packaging==24.0 +pluggy==1.5.0 +psycopg2-binary==2.9.10 +py-cpuinfo==9.0.0 +pytest==7.4.4 +tomli==2.0.1 +typing_extensions==4.12.2 +wrapt==1.16.0 +zipp==3.19.2 +-e opentelemetry-instrumentation +-e instrumentation/opentelemetry-instrumentation-dbapi +-e instrumentation/opentelemetry-instrumentation-psycopg2 diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index 071abe2ad7..cea9e3e11f 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -144,6 +144,10 @@ "library": "psycopg2 >= 2.7.3.1", "instrumentation": "opentelemetry-instrumentation-psycopg2==0.51b0.dev", }, + { + "library": "psycopg2-binary >= 2.7.3.1", + "instrumentation": "opentelemetry-instrumentation-psycopg2==0.51b0.dev", + }, { "library": "pymemcache >= 1.3.5, < 5", "instrumentation": "opentelemetry-instrumentation-pymemcache==0.51b0.dev", diff --git a/tox.ini b/tox.ini index 70c6ae6a28..806c74b426 100644 --- a/tox.ini +++ b/tox.ini @@ -212,6 +212,7 @@ envlist = ; opentelemetry-instrumentation-psycopg2 py3{8,9,10,11,12,13}-test-instrumentation-psycopg2 + py3{8,9,10,11,12,13}-test-instrumentation-psycopg2-binary ; ext-psycopg2 intentionally excluded from pypy3 lint-instrumentation-psycopg2 @@ -574,6 +575,9 @@ deps = psycopg2: {[testenv]test_deps} psycopg2: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2/test-requirements.txt + psycopg2-binary: {[testenv]test_deps} + psycopg2-binary: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2/test-requirements-binary.txt + pymysql: {[testenv]test_deps} pymysql: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql/test-requirements.txt @@ -813,6 +817,9 @@ commands = test-instrumentation-psycopg2: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2/tests {posargs} lint-instrumentation-psycopg2: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-psycopg2" + ; Test only for psycopg2-binary instrumentation as the only difference between psycopg2 and psycopg2-binary is the install method + test-instrumentation-psycopg2-binary: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2/tests {posargs} + test-instrumentation-pymemcache: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pymemcache/tests {posargs} lint-instrumentation-pymemcache: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pymemcache" From ec3c51dcd18fa747c567708d8aead21cd9081dca Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Wed, 22 Jan 2025 13:32:34 -0500 Subject: [PATCH 05/16] Added Vertex AI spans for request parameters (#3192) * Added Vertex AI spans for request parameters * small fixes, get CI passing * Use standard OTel tracing error handling * move nested util * Actually use GAPIC client since thats what we use under the hood Also this is what LangChain uses * Comment out seed for now * Remove unnecessary dict.get() calls * Typing improvements to check that we support both v1 and v1beta1 * Add more teest cases for error conditions and fix span name bug * fix typing * Add todos for error.type --- .../CHANGELOG.md | 2 + .../instrumentation/vertexai/__init__.py | 28 ++- .../instrumentation/vertexai/patch.py | 121 ++++++++++++ .../instrumentation/vertexai/utils.py | 139 ++++++++++++++ .../cassettes/test_generate_content.yaml | 70 +++++++ .../test_generate_content_extra_params.yaml | 82 +++++++++ ..._generate_content_invalid_temperature.yaml | 59 ++++++ .../test_generate_content_missing_model.yaml | 56 ++++++ .../tests/conftest.py | 137 ++++++++++++-- .../tests/test_chat_completions.py | 173 ++++++++++++++++++ .../tests/test_placeholder.py | 20 -- tox.ini | 2 +- 12 files changed, 848 insertions(+), 41 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content.yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_extra_params.yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_temperature.yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_missing_model.yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_placeholder.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md index 4d786c7840..4e43fbff19 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md @@ -7,5 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +- Added Vertex AI spans for request parameters + ([#3192](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3192)) - Initial VertexAI instrumentation ([#3123](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3123)) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/__init__.py index 9437184ff0..40d1cb48ac 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/__init__.py @@ -41,9 +41,17 @@ from typing import Any, Collection +from wrapt import ( + wrap_function_wrapper, # type: ignore[reportUnknownVariableType] +) + from opentelemetry._events import get_event_logger from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.vertexai.package import _instruments +from opentelemetry.instrumentation.vertexai.patch import ( + generate_content_create, +) +from opentelemetry.instrumentation.vertexai.utils import is_content_enabled from opentelemetry.semconv.schemas import Schemas from opentelemetry.trace import get_tracer @@ -55,20 +63,34 @@ def instrumentation_dependencies(self) -> Collection[str]: def _instrument(self, **kwargs: Any): """Enable VertexAI instrumentation.""" tracer_provider = kwargs.get("tracer_provider") - _tracer = get_tracer( + tracer = get_tracer( __name__, "", tracer_provider, schema_url=Schemas.V1_28_0.value, ) event_logger_provider = kwargs.get("event_logger_provider") - _event_logger = get_event_logger( + event_logger = get_event_logger( __name__, "", schema_url=Schemas.V1_28_0.value, event_logger_provider=event_logger_provider, ) - # TODO: implemented in later PR + + wrap_function_wrapper( + module="google.cloud.aiplatform_v1beta1.services.prediction_service.client", + name="PredictionServiceClient.generate_content", + wrapper=generate_content_create( + tracer, event_logger, is_content_enabled() + ), + ) + wrap_function_wrapper( + module="google.cloud.aiplatform_v1.services.prediction_service.client", + name="PredictionServiceClient.generate_content", + wrapper=generate_content_create( + tracer, event_logger, is_content_enabled() + ), + ) def _uninstrument(self, **kwargs: Any) -> None: """TODO: implemented in later PR""" diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py index b0a6f42841..36a31045b5 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py @@ -11,3 +11,124 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, + MutableSequence, +) + +from opentelemetry._events import EventLogger +from opentelemetry.instrumentation.vertexai.utils import ( + GenerateContentParams, + get_genai_request_attributes, + get_span_name, +) +from opentelemetry.trace import SpanKind, Tracer + +if TYPE_CHECKING: + from google.cloud.aiplatform_v1.services.prediction_service import client + from google.cloud.aiplatform_v1.types import ( + content, + prediction_service, + ) + from google.cloud.aiplatform_v1beta1.services.prediction_service import ( + client as client_v1beta1, + ) + from google.cloud.aiplatform_v1beta1.types import ( + content as content_v1beta1, + ) + from google.cloud.aiplatform_v1beta1.types import ( + prediction_service as prediction_service_v1beta1, + ) + + +# Use parameter signature from +# https://github.com/googleapis/python-aiplatform/blob/v1.76.0/google/cloud/aiplatform_v1/services/prediction_service/client.py#L2088 +# to handle named vs positional args robustly +def _extract_params( + request: prediction_service.GenerateContentRequest + | prediction_service_v1beta1.GenerateContentRequest + | dict[Any, Any] + | None = None, + *, + model: str | None = None, + contents: MutableSequence[content.Content] + | MutableSequence[content_v1beta1.Content] + | None = None, + **_kwargs: Any, +) -> GenerateContentParams: + # Request vs the named parameters are mututally exclusive or the RPC will fail + if not request: + return GenerateContentParams( + model=model or "", + contents=contents, + ) + + if isinstance(request, dict): + return GenerateContentParams(**request) + + return GenerateContentParams( + model=request.model, + contents=request.contents, + system_instruction=request.system_instruction, + tools=request.tools, + tool_config=request.tool_config, + labels=request.labels, + safety_settings=request.safety_settings, + generation_config=request.generation_config, + ) + + +def generate_content_create( + tracer: Tracer, event_logger: EventLogger, capture_content: bool +): + """Wrap the `generate_content` method of the `GenerativeModel` class to trace it.""" + + def traced_method( + wrapped: Callable[ + ..., + prediction_service.GenerateContentResponse + | prediction_service_v1beta1.GenerateContentResponse, + ], + instance: client.PredictionServiceClient + | client_v1beta1.PredictionServiceClient, + args: Any, + kwargs: Any, + ): + params = _extract_params(*args, **kwargs) + span_attributes = get_genai_request_attributes(params) + + span_name = get_span_name(span_attributes) + with tracer.start_as_current_span( + name=span_name, + kind=SpanKind.CLIENT, + attributes=span_attributes, + ) as _span: + # TODO: emit request events + # if span.is_recording(): + # for message in kwargs.get("messages", []): + # event_logger.emit( + # message_to_event(message, capture_content) + # ) + + # TODO: set error.type attribute + # https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md + result = wrapped(*args, **kwargs) + # TODO: handle streaming + # if is_streaming(kwargs): + # return StreamWrapper( + # result, span, event_logger, capture_content + # ) + + # TODO: add response attributes and events + # if span.is_recording(): + # _set_response_attributes( + # span, result, event_logger, capture_content + # ) + return result + + return traced_method diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py new file mode 100644 index 0000000000..96d7125028 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py @@ -0,0 +1,139 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import re +from dataclasses import dataclass +from os import environ +from typing import ( + TYPE_CHECKING, + Mapping, + Sequence, +) + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) +from opentelemetry.util.types import AttributeValue + +if TYPE_CHECKING: + from google.cloud.aiplatform_v1.types import content, tool + from google.cloud.aiplatform_v1beta1.types import ( + content as content_v1beta1, + ) + from google.cloud.aiplatform_v1beta1.types import ( + tool as tool_v1beta1, + ) + + +@dataclass(frozen=True) +class GenerateContentParams: + model: str + contents: ( + Sequence[content.Content] | Sequence[content_v1beta1.Content] | None + ) = None + system_instruction: content.Content | content_v1beta1.Content | None = None + tools: Sequence[tool.Tool] | Sequence[tool_v1beta1.Tool] | None = None + tool_config: tool.ToolConfig | tool_v1beta1.ToolConfig | None = None + labels: Mapping[str, str] | None = None + safety_settings: ( + Sequence[content.SafetySetting] + | Sequence[content_v1beta1.SafetySetting] + | None + ) = None + generation_config: ( + content.GenerationConfig | content_v1beta1.GenerationConfig | None + ) = None + + +def get_genai_request_attributes( + params: GenerateContentParams, + operation_name: GenAIAttributes.GenAiOperationNameValues = GenAIAttributes.GenAiOperationNameValues.CHAT, +): + model = _get_model_name(params.model) + generation_config = params.generation_config + attributes: dict[str, AttributeValue] = { + GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name.value, + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.VERTEX_AI.value, + GenAIAttributes.GEN_AI_REQUEST_MODEL: model, + } + + if not generation_config: + return attributes + + # Check for optional fields + # https://proto-plus-python.readthedocs.io/en/stable/fields.html#optional-fields + if "temperature" in generation_config: + attributes[GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE] = ( + generation_config.temperature + ) + if "top_p" in generation_config: + attributes[GenAIAttributes.GEN_AI_REQUEST_TOP_P] = ( + generation_config.top_p + ) + if "max_output_tokens" in generation_config: + attributes[GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS] = ( + generation_config.max_output_tokens + ) + if "presence_penalty" in generation_config: + attributes[GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY] = ( + generation_config.presence_penalty + ) + if "frequency_penalty" in generation_config: + attributes[GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY] = ( + generation_config.frequency_penalty + ) + # Uncomment once GEN_AI_REQUEST_SEED is released in 1.30 + # https://github.com/open-telemetry/semantic-conventions/pull/1710 + # if "seed" in generation_config: + # attributes[GenAIAttributes.GEN_AI_REQUEST_SEED] = ( + # generation_config.seed + # ) + if "stop_sequences" in generation_config: + attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = ( + generation_config.stop_sequences + ) + + return attributes + + +_MODEL_STRIP_RE = re.compile( + r"^projects/(.*)/locations/(.*)/publishers/google/models/" +) + + +def _get_model_name(model: str) -> str: + return _MODEL_STRIP_RE.sub("", model) + + +OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = ( + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" +) + + +def is_content_enabled() -> bool: + capture_content = environ.get( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false" + ) + + return capture_content.lower() == "true" + + +def get_span_name(span_attributes: Mapping[str, AttributeValue]) -> str: + name = span_attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + model = span_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] + if not model: + return f"{name}" + return f"{name} {model}" diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content.yaml new file mode 100644 index 0000000000..69856f9308 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content.yaml @@ -0,0 +1,70 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "role": "user", + "parts": [ + { + "text": "Say this is a test" + } + ] + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '141' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "Okay, I understand. I'm ready for your test. Please proceed.\n" + } + ] + }, + "finishReason": 1, + "avgLogprobs": -0.005692833348324424 + } + ], + "usageMetadata": { + "promptTokenCount": 5, + "candidatesTokenCount": 19, + "totalTokenCount": 24 + }, + "modelVersion": "gemini-1.5-flash-002" + } + headers: + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + content-length: + - '453' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_extra_params.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_extra_params.yaml new file mode 100644 index 0000000000..e6547166bc --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_extra_params.yaml @@ -0,0 +1,82 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "role": "user", + "parts": [ + { + "text": "Say this is a test" + } + ] + } + ], + "generationConfig": { + "temperature": 0.2, + "topP": 0.95, + "topK": 2.0, + "maxOutputTokens": 5, + "stopSequences": [ + "\n\n\n" + ], + "presencePenalty": -1.5, + "frequencyPenalty": 1.0, + "seed": 12345 + } + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '376' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "Okay, I understand." + } + ] + }, + "finishReason": 2, + "avgLogprobs": -0.006721805781126022 + } + ], + "usageMetadata": { + "promptTokenCount": 5, + "candidatesTokenCount": 5, + "totalTokenCount": 10 + }, + "modelVersion": "gemini-1.5-flash-002" + } + headers: + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + content-length: + - '407' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_temperature.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_temperature.yaml new file mode 100644 index 0000000000..600635f9b2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_temperature.yaml @@ -0,0 +1,59 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "role": "user", + "parts": [ + { + "text": "Say this is a test" + } + ] + } + ], + "generationConfig": { + "temperature": 1000.0 + } + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '196' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: |- + { + "error": { + "code": 400, + "message": "Unable to submit request because it has a temperature value of 1000 but the supported range is from 0 (inclusive) to 2.0001 (exclusive). Update the value and try again.", + "status": "INVALID_ARGUMENT", + "details": [] + } + } + headers: + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + content-length: + - '809' + status: + code: 400 + message: Bad Request +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_missing_model.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_missing_model.yaml new file mode 100644 index 0000000000..efe3e152ce --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_missing_model.yaml @@ -0,0 +1,56 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "role": "user", + "parts": [ + { + "text": "Say this is a test" + } + ] + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '141' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-does-not-exist:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: |- + { + "error": { + "code": 404, + "message": "Publisher Model `projects/otel-starter-project/locations/us-central1/publishers/google/models/gemini-does-not-exist` not found.", + "status": "NOT_FOUND", + "details": [] + } + } + headers: + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + content-length: + - '672' + status: + code: 404 + message: Not Found +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/conftest.py index 8337188ece..b76a108805 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/conftest.py @@ -1,22 +1,42 @@ """Unit tests configuration module.""" import json +import os +import re +from typing import Any, Mapping, MutableMapping import pytest +import vertexai import yaml - +from google.auth.credentials import AnonymousCredentials +from vcr import VCR +from vcr.record_mode import RecordMode +from vcr.request import Request + +from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor +from opentelemetry.instrumentation.vertexai.utils import ( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, +) from opentelemetry.sdk._events import EventLoggerProvider from opentelemetry.sdk._logs import LoggerProvider from opentelemetry.sdk._logs.export import ( InMemoryLogExporter, SimpleLogRecordProcessor, ) +from opentelemetry.sdk.metrics import ( + MeterProvider, +) +from opentelemetry.sdk.metrics.export import ( + InMemoryMetricReader, +) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) +FAKE_PROJECT = "fake-project" + @pytest.fixture(scope="function", name="span_exporter") def fixture_span_exporter(): @@ -30,6 +50,12 @@ def fixture_log_exporter(): yield exporter +@pytest.fixture(scope="function", name="metric_reader") +def fixture_metric_reader(): + exporter = InMemoryMetricReader() + yield exporter + + @pytest.fixture(scope="function", name="tracer_provider") def fixture_tracer_provider(span_exporter): provider = TracerProvider() @@ -46,17 +72,103 @@ def fixture_event_logger_provider(log_exporter): return event_logger_provider +@pytest.fixture(scope="function", name="meter_provider") +def fixture_meter_provider(metric_reader): + return MeterProvider( + metric_readers=[metric_reader], + ) + + +@pytest.fixture(autouse=True) +def vertexai_init(vcr: VCR) -> None: + # When not recording (in CI), don't do any auth. That prevents trying to read application + # default credentials from the filesystem or metadata server and oauth token exchange. This + # is not the interesting part of our instrumentation to test. + credentials = None + project = None + if vcr.record_mode == RecordMode.NONE: + credentials = AnonymousCredentials() + project = FAKE_PROJECT + vertexai.init( + api_transport="rest", credentials=credentials, project=project + ) + + +@pytest.fixture +def instrument_no_content( + tracer_provider, event_logger_provider, meter_provider +): + os.environ.update( + {OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "False"} + ) + + instrumentor = VertexAIInstrumentor() + instrumentor.instrument( + tracer_provider=tracer_provider, + event_logger_provider=event_logger_provider, + meter_provider=meter_provider, + ) + + yield instrumentor + os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None) + instrumentor.uninstrument() + + +@pytest.fixture +def instrument_with_content( + tracer_provider, event_logger_provider, meter_provider +): + os.environ.update( + {OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "True"} + ) + instrumentor = VertexAIInstrumentor() + instrumentor.instrument( + tracer_provider=tracer_provider, + event_logger_provider=event_logger_provider, + meter_provider=meter_provider, + ) + + yield instrumentor + os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None) + instrumentor.uninstrument() + + @pytest.fixture(scope="module") def vcr_config(): + filter_header_regexes = [ + r"X-.*", + "Server", + "Date", + "Expires", + "Authorization", + ] + + def filter_headers(headers: Mapping[str, str]) -> Mapping[str, str]: + return { + key: val + for key, val in headers.items() + if not any( + re.match(filter_re, key, re.IGNORECASE) + for filter_re in filter_header_regexes + ) + } + + def before_record_cb(request: Request): + request.headers = filter_headers(request.headers) + request.uri = re.sub( + r"/projects/[^/]+/", "/projects/fake-project/", request.uri + ) + return request + + def before_response_cb(response: MutableMapping[str, Any]): + response["headers"] = filter_headers(response["headers"]) + return response + return { - "filter_headers": [ - ("cookie", "test_cookie"), - ("authorization", "Bearer test_vertexai_api_key"), - ("vertexai-organization", "test_vertexai_org_id"), - ("vertexai-project", "test_vertexai_project_id"), - ], "decode_compressed_response": True, - "before_record_response": scrub_response_headers, + "before_record_request": before_record_cb, + "before_record_response": before_response_cb, + "ignore_hosts": ["oauth2.googleapis.com"], } @@ -125,12 +237,3 @@ def deserialize(cassette_string): def fixture_vcr(vcr): vcr.register_serializer("yaml", PrettyPrintJSONBody) return vcr - - -def scrub_response_headers(response): - """ - This scrubs sensitive response headers. Note they are case-sensitive! - """ - response["headers"]["vertexai-organization"] = "test_vertexai_org_id" - response["headers"]["Set-Cookie"] = "test_set_cookie" - return response diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py new file mode 100644 index 0000000000..63a2e2c2d1 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py @@ -0,0 +1,173 @@ +import pytest +from google.api_core.exceptions import BadRequest, NotFound +from vertexai.generative_models import ( + Content, + GenerationConfig, + GenerativeModel, + Part, +) + +from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) +from opentelemetry.trace import StatusCode + + +@pytest.mark.vcr +def test_generate_content( + span_exporter: InMemorySpanExporter, + instrument_with_content: VertexAIInstrumentor, +): + model = GenerativeModel("gemini-1.5-flash-002") + model.generate_content( + [ + Content(role="user", parts=[Part.from_text("Say this is a test")]), + ] + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + assert spans[0].name == "chat gemini-1.5-flash-002" + assert dict(spans[0].attributes) == { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "gemini-1.5-flash-002", + "gen_ai.system": "vertex_ai", + } + + +@pytest.mark.vcr +def test_generate_content_empty_model( + span_exporter: InMemorySpanExporter, + instrument_with_content: VertexAIInstrumentor, +): + model = GenerativeModel("") + try: + model.generate_content( + [ + Content( + role="user", parts=[Part.from_text("Say this is a test")] + ) + ], + ) + except ValueError: + pass + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + assert spans[0].name == "chat" + # Captures invalid params + assert dict(spans[0].attributes) == { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "", + "gen_ai.system": "vertex_ai", + } + assert_span_error(spans[0]) + + +@pytest.mark.vcr +def test_generate_content_missing_model( + span_exporter: InMemorySpanExporter, + instrument_with_content: VertexAIInstrumentor, +): + model = GenerativeModel("gemini-does-not-exist") + try: + model.generate_content( + [ + Content( + role="user", parts=[Part.from_text("Say this is a test")] + ) + ], + ) + except NotFound: + pass + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + assert spans[0].name == "chat gemini-does-not-exist" + # Captures invalid params + assert dict(spans[0].attributes) == { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "gemini-does-not-exist", + "gen_ai.system": "vertex_ai", + } + assert_span_error(spans[0]) + + +@pytest.mark.vcr +def test_generate_content_invalid_temperature( + span_exporter: InMemorySpanExporter, + instrument_with_content: VertexAIInstrumentor, +): + model = GenerativeModel("gemini-1.5-flash-002") + try: + # Temperature out of range causes error + model.generate_content( + [ + Content( + role="user", parts=[Part.from_text("Say this is a test")] + ) + ], + generation_config=GenerationConfig(temperature=1000), + ) + except BadRequest: + pass + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + assert spans[0].name == "chat gemini-1.5-flash-002" + assert dict(spans[0].attributes) == { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "gemini-1.5-flash-002", + "gen_ai.request.temperature": 1000.0, + "gen_ai.system": "vertex_ai", + } + assert_span_error(spans[0]) + + +@pytest.mark.vcr() +def test_generate_content_extra_params(span_exporter, instrument_no_content): + generation_config = GenerationConfig( + top_k=2, + top_p=0.95, + temperature=0.2, + stop_sequences=["\n\n\n"], + max_output_tokens=5, + presence_penalty=-1.5, + frequency_penalty=1.0, + seed=12345, + ) + model = GenerativeModel("gemini-1.5-flash-002") + model.generate_content( + [ + Content(role="user", parts=[Part.from_text("Say this is a test")]), + ], + generation_config=generation_config, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + assert dict(spans[0].attributes) == { + "gen_ai.operation.name": "chat", + "gen_ai.request.frequency_penalty": 1.0, + "gen_ai.request.max_tokens": 5, + "gen_ai.request.model": "gemini-1.5-flash-002", + "gen_ai.request.presence_penalty": -1.5, + "gen_ai.request.stop_sequences": ("\n\n\n",), + "gen_ai.request.temperature": 0.20000000298023224, + "gen_ai.request.top_p": 0.949999988079071, + "gen_ai.system": "vertex_ai", + } + + +def assert_span_error(span: ReadableSpan) -> None: + # Sets error status + assert span.status.status_code == StatusCode.ERROR + + # TODO: check thate error.type is set + # https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md + + # Records exception event + error_events = [e for e in span.events if e.name == "exception"] + assert error_events != [] diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_placeholder.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_placeholder.py deleted file mode 100644 index c910bfa0bf..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_placeholder.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# TODO: adapt tests from OpenLLMetry here along with tests from -# instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py - - -def test_placeholder(): - assert True diff --git a/tox.ini b/tox.ini index 806c74b426..22e56a835f 100644 --- a/tox.ini +++ b/tox.ini @@ -802,7 +802,7 @@ commands = test-instrumentation-openai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests {posargs} lint-instrumentation-openai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-openai-v2" - test-instrumentation-vertexai: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests {posargs} + test-instrumentation-vertexai: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests --vcr-record=none {posargs} lint-instrumentation-vertexai: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-vertexai" test-instrumentation-sio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pika/tests {posargs} From 2756c1edff42db2984296e0c5dbe668e8de85aa8 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Thu, 23 Jan 2025 17:14:49 +0100 Subject: [PATCH 06/16] botocore: add basic handling for bedrock invoke.model (#3200) * Add basic handling for invoke.model * Add changelog a please pylint * Record converse cassettes against us-east-1 * Avoid double copy of streaming body --------- Co-authored-by: Adrian Cole <64215+codefromthecrypt@users.noreply.github.com> --- CHANGELOG.md | 2 + .../bedrock-runtime/zero-code/invoke_model.py | 25 +++ .../botocore/extensions/bedrock.py | 197 +++++++++++++++++- .../tests/bedrock_utils.py | 75 ++++++- .../cassettes/test_converse_with_content.yaml | 64 ++---- .../test_converse_with_invalid_model.yaml | 33 +-- ...nvoke_model_with_content[amazon.nova].yaml | 58 ++++++ ...voke_model_with_content[amazon.titan].yaml | 57 +++++ ..._model_with_content[anthropic.claude].yaml | 58 ++++++ .../test_invoke_model_with_invalid_model.yaml | 51 +++++ .../tests/conftest.py | 71 +------ .../tests/test_botocore_bedrock.py | 130 +++++++++++- 12 files changed, 664 insertions(+), 157 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.nova].yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.titan].yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[anthropic.claude].yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_invalid_model.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 3838bd844f..72c372ecb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3186](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3186)) - `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock Converse API ([#3161](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3161)) +- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock InvokeModel API + ([#3200](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3200)) ### Fixed diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model.py b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model.py new file mode 100644 index 0000000000..f023e4aac7 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model.py @@ -0,0 +1,25 @@ +import json +import os + +import boto3 + + +def main(): + client = boto3.client("bedrock-runtime") + response = client.invoke_model( + modelId=os.getenv("CHAT_MODEL", "amazon.titan-text-lite-v1"), + body=json.dumps( + { + "inputText": "Write a short poem on OpenTelemetry.", + "textGenerationConfig": {}, + }, + ), + ) + + body = response["body"].read() + response_data = json.loads(body.decode("utf-8")) + print(response_data["results"][0]["outputText"]) + + +if __name__ == "__main__": + main() diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index fe826da603..66021d34ff 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -18,9 +18,13 @@ from __future__ import annotations +import io +import json import logging from typing import Any +from botocore.response import StreamingBody + from opentelemetry.instrumentation.botocore.extensions.types import ( _AttributeMapT, _AwsSdkExtension, @@ -58,7 +62,7 @@ class _BedrockRuntimeExtension(_AwsSdkExtension): Amazon Bedrock Runtime. """ - _HANDLED_OPERATIONS = {"Converse"} + _HANDLED_OPERATIONS = {"Converse", "InvokeModel"} def extract_attributes(self, attributes: _AttributeMapT): if self._call_context.operation not in self._HANDLED_OPERATIONS: @@ -73,6 +77,7 @@ def extract_attributes(self, attributes: _AttributeMapT): GenAiOperationNameValues.CHAT.value ) + # Converse if inference_config := self._call_context.params.get( "inferenceConfig" ): @@ -97,6 +102,84 @@ def extract_attributes(self, attributes: _AttributeMapT): inference_config.get("stopSequences"), ) + # InvokeModel + # Get the request body if it exists + body = self._call_context.params.get("body") + if body: + try: + request_body = json.loads(body) + + if "amazon.titan" in model_id: + # titan interface is a text completion one + attributes[GEN_AI_OPERATION_NAME] = ( + GenAiOperationNameValues.TEXT_COMPLETION.value + ) + self._extract_titan_attributes( + attributes, request_body + ) + elif "amazon.nova" in model_id: + self._extract_nova_attributes(attributes, request_body) + elif "anthropic.claude" in model_id: + self._extract_claude_attributes( + attributes, request_body + ) + except json.JSONDecodeError: + _logger.debug("Error: Unable to parse the body as JSON") + + def _extract_titan_attributes(self, attributes, request_body): + config = request_body.get("textGenerationConfig", {}) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, config.get("topP") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("maxTokenCount") + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_STOP_SEQUENCES, + config.get("stopSequences"), + ) + + def _extract_nova_attributes(self, attributes, request_body): + config = request_body.get("inferenceConfig", {}) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, config.get("topP") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("max_new_tokens") + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_STOP_SEQUENCES, + config.get("stopSequences"), + ) + + def _extract_claude_attributes(self, attributes, request_body): + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + request_body.get("max_tokens"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + request_body.get("temperature"), + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_STOP_SEQUENCES, + request_body.get("stop_sequences"), + ) + @staticmethod def _set_if_not_none(attributes, key, value): if value is not None: @@ -115,13 +198,8 @@ def before_service_call(self, span: Span): if operation_name and request_model: span.update_name(f"{operation_name} {request_model}") - def on_success(self, span: Span, result: dict[str, Any]): - if self._call_context.operation not in self._HANDLED_OPERATIONS: - return - - if not span.is_recording(): - return - + # pylint: disable=no-self-use + def _converse_on_success(self, span: Span, result: dict[str, Any]): if usage := result.get("usage"): if input_tokens := usage.get("inputTokens"): span.set_attribute( @@ -140,6 +218,109 @@ def on_success(self, span: Span, result: dict[str, Any]): [stop_reason], ) + def _invoke_model_on_success( + self, span: Span, result: dict[str, Any], model_id: str + ): + original_body = None + try: + original_body = result["body"] + body_content = original_body.read() + + # Replenish stream for downstream application use + new_stream = io.BytesIO(body_content) + result["body"] = StreamingBody(new_stream, len(body_content)) + + response_body = json.loads(body_content.decode("utf-8")) + if "amazon.titan" in model_id: + self._handle_amazon_titan_response(span, response_body) + elif "amazon.nova" in model_id: + self._handle_amazon_nova_response(span, response_body) + elif "anthropic.claude" in model_id: + self._handle_anthropic_claude_response(span, response_body) + + except json.JSONDecodeError: + _logger.debug("Error: Unable to parse the response body as JSON") + except Exception as exc: # pylint: disable=broad-exception-caught + _logger.debug("Error processing response: %s", exc) + finally: + if original_body is not None: + original_body.close() + + def on_success(self, span: Span, result: dict[str, Any]): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + + if not span.is_recording(): + return + + # Converse + self._converse_on_success(span, result) + + model_id = self._call_context.params.get(_MODEL_ID_KEY) + if not model_id: + return + + # InvokeModel + if "body" in result and isinstance(result["body"], StreamingBody): + self._invoke_model_on_success(span, result, model_id) + + # pylint: disable=no-self-use + def _handle_amazon_titan_response( + self, span: Span, response_body: dict[str, Any] + ): + if "inputTextTokenCount" in response_body: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, response_body["inputTextTokenCount"] + ) + if "results" in response_body and response_body["results"]: + result = response_body["results"][0] + if "tokenCount" in result: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, result["tokenCount"] + ) + if "completionReason" in result: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, + [result["completionReason"]], + ) + + # pylint: disable=no-self-use + def _handle_amazon_nova_response( + self, span: Span, response_body: dict[str, Any] + ): + if "usage" in response_body: + usage = response_body["usage"] + if "inputTokens" in usage: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, usage["inputTokens"] + ) + if "outputTokens" in usage: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, usage["outputTokens"] + ) + if "stopReason" in response_body: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stopReason"]] + ) + + # pylint: disable=no-self-use + def _handle_anthropic_claude_response( + self, span: Span, response_body: dict[str, Any] + ): + if usage := response_body.get("usage"): + if "input_tokens" in usage: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, usage["input_tokens"] + ) + if "output_tokens" in usage: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, usage["output_tokens"] + ) + if "stop_reason" in response_body: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stop_reason"]] + ) + def on_error(self, span: Span, exception: _BotoClientErrorT): if self._call_context.operation not in self._HANDLED_OPERATIONS: return diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py index 6d2415432f..460d3a4fb5 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py @@ -14,14 +14,83 @@ from __future__ import annotations +import json from typing import Any +from botocore.response import StreamingBody + from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) +# pylint: disable=too-many-branches, too-many-locals +def assert_completion_attributes_from_streaming_body( + span: ReadableSpan, + request_model: str, + response: StreamingBody | None, + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: list[str] | None = None, +): + input_tokens = None + output_tokens = None + finish_reason = None + if response is not None: + original_body = response["body"] + body_content = original_body.read() + response = json.loads(body_content.decode("utf-8")) + assert response + + if "amazon.titan" in request_model: + input_tokens = response.get("inputTextTokenCount") + results = response.get("results") + if results: + first_result = results[0] + output_tokens = first_result.get("tokenCount") + finish_reason = (first_result["completionReason"],) + elif "amazon.nova" in request_model: + if usage := response.get("usage"): + input_tokens = usage["inputTokens"] + output_tokens = usage["outputTokens"] + else: + input_tokens, output_tokens = None, None + + if "stopReason" in response: + finish_reason = (response["stopReason"],) + else: + finish_reason = None + elif "anthropic.claude" in request_model: + if usage := response.get("usage"): + input_tokens = usage["input_tokens"] + output_tokens = usage["output_tokens"] + else: + input_tokens, output_tokens = None, None + + if "stop_reason" in response: + finish_reason = (response["stop_reason"],) + else: + finish_reason = None + + return assert_all_attributes( + span, + request_model, + input_tokens, + output_tokens, + finish_reason, + operation_name, + request_top_p, + request_temperature, + request_max_tokens, + tuple(request_stop_sequences) + if request_stop_sequences is not None + else request_stop_sequences, + ) + + def assert_completion_attributes( span: ReadableSpan, request_model: str, @@ -38,7 +107,7 @@ def assert_completion_attributes( else: input_tokens, output_tokens = None, None - if response: + if response and "stopReason" in response: finish_reason = (response["stopReason"],) else: finish_reason = None @@ -60,10 +129,10 @@ def assert_completion_attributes( def assert_equal_or_not_present(value, attribute_name, span): - if value: + if value is not None: assert value == span.attributes[attribute_name] else: - assert attribute_name not in span.attributes + assert attribute_name not in span.attributes, attribute_name def assert_all_attributes( diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml index 8060f02076..f9a6f76e96 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml @@ -1,26 +1,8 @@ interactions: - request: - body: |- - { - "messages": [ - { - "role": "user", - "content": [ - { - "text": "Say this is a test" - } - ] - } - ], - "inferenceConfig": { - "maxTokens": 10, - "temperature": 0.8, - "topP": 1, - "stopSequences": [ - "|" - ] - } - } + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}], + "inferenceConfig": {"maxTokens": 10, "temperature": 0.8, "topP": 1, "stopSequences": + ["|"]}}' headers: Content-Length: - '170' @@ -34,59 +16,39 @@ interactions: aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 X-Amz-Date: - !!binary | - MjAyNDEyMzFUMTMyMDQxWg== + MjAyNTAxMjJUMTYwODQwWg== X-Amz-Security-Token: - test_aws_security_token X-Amzn-Trace-Id: - !!binary | - Um9vdD0xLWY1MWY4NGM1LTNiZjk4YzY0YWMyNmJhNTk1OWJjODgxNjtQYXJlbnQ9YjNmOGZhM2Mz - MDc1NGEzZjtTYW1wbGVkPTE= + Um9vdD0xLTZjNTNiNTMyLTI0MDMzZTUwYzQ0M2JkODY2YTVhODhmMztQYXJlbnQ9MWM4ZDk4NmE2 + Zjk1Y2Y0NTtTYW1wbGVkPTE= amz-sdk-invocation-id: - !!binary | - OTIyMjczMzItY2I5ZS00NGM1LTliZGUtYjU0NmJmODkxYmEy + MmRkMzAxNjUtYTdmOC00MjAzLWJlOGItZmE1ZWEzYmFjOGUy amz-sdk-request: - !!binary | YXR0ZW1wdD0x authorization: - Bearer test_aws_authorization method: POST - uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/amazon.titan-text-lite-v1/converse + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/converse response: body: - string: |- - { - "metrics": { - "latencyMs": 811 - }, - "output": { - "message": { - "content": [ - { - "text": "I am happy to assist you today" - } - ], - "role": "assistant" - } - }, - "stopReason": "max_tokens", - "usage": { - "inputTokens": 8, - "outputTokens": 10, - "totalTokens": 18 - } - } + string: '{"metrics":{"latencyMs":742},"output":{"message":{"content":[{"text":"Hey + there! Is there anything else"}],"role":"assistant"}},"stopReason":"max_tokens","usage":{"inputTokens":8,"outputTokens":10,"totalTokens":18}}' headers: Connection: - keep-alive Content-Length: - - '212' + - '215' Content-Type: - application/json Date: - - Tue, 31 Dec 2024 13:20:42 GMT + - Wed, 22 Jan 2025 16:08:41 GMT Set-Cookie: test_set_cookie x-amzn-RequestId: - - 63dfbcb2-3536-4906-b10d-e5b126b3c0ae + - 9fe3b968-40b3-400c-a48d-96fdf682557c status: code: 200 message: OK diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml index ecbfb6bbd0..37c2d08ed0 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml @@ -1,18 +1,6 @@ interactions: - request: - body: |- - { - "messages": [ - { - "role": "user", - "content": [ - { - "text": "Say this is a test" - } - ] - } - ] - } + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}]}' headers: Content-Length: - '77' @@ -26,29 +14,26 @@ interactions: aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 X-Amz-Date: - !!binary | - MjAyNTAxMTVUMTEwMTQ3Wg== + MjAyNTAxMjJUMTYwODQxWg== X-Amz-Security-Token: - test_aws_security_token X-Amzn-Trace-Id: - !!binary | - Um9vdD0xLWIzM2JhNTkxLTdkYmQ0ZDZmYTBmZTdmYzc2MTExOThmNztQYXJlbnQ9NzRmNmQ1NTEz - MzkzMzUxNTtTYW1wbGVkPTE= + Um9vdD0xLTY4MzBlNjVhLTY4Y2JlMzA5ZTI2ZDA1ZjA4ZDZkY2M1YjtQYXJlbnQ9NjdlMDRlNjRj + NGZhOTI3MDtTYW1wbGVkPTE= amz-sdk-invocation-id: - !!binary | - NTQ5MmQ0NTktNzhkNi00ZWY4LTlmMDMtZTA5ODhkZGRiZDI5 + N2VhMWVmYzktMzlkYS00NDU1LWJiYTctMDNmYTM1ZWUyODU2 amz-sdk-request: - !!binary | YXR0ZW1wdD0x authorization: - Bearer test_aws_authorization method: POST - uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/does-not-exist/converse + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/does-not-exist/converse response: body: - string: |- - { - "message": "The provided model identifier is invalid." - } + string: '{"message":"The provided model identifier is invalid."}' headers: Connection: - keep-alive @@ -57,12 +42,12 @@ interactions: Content-Type: - application/json Date: - - Wed, 15 Jan 2025 11:01:47 GMT + - Wed, 22 Jan 2025 16:08:41 GMT Set-Cookie: test_set_cookie x-amzn-ErrorType: - ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/ x-amzn-RequestId: - - d425bf99-8a4e-4d83-8d77-a48410dd82b2 + - 9ecb3c28-f72f-4350-8746-97c02140ced1 status: code: 400 message: Bad Request diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.nova].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.nova].yaml new file mode 100644 index 0000000000..331c597c54 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.nova].yaml @@ -0,0 +1,58 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}], + "inferenceConfig": {"max_new_tokens": 10, "temperature": 0.8, "topP": 1, "stopSequences": + ["|"]}, "schemaVersion": "messages-v1"}' + headers: + Content-Length: + - '207' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjJUMTUyNDA0Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLTY0ZGIzYWIxLTc2YWUzYmUxYmQ0NzI4Mzg1ZjdmOTEzZTtQYXJlbnQ9ZGRmYTdlZjI4 + NWNiYTIxNTtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + ZDZlMGIyOTUtYjM5Yi00NGU3LThiMmItZjgyODM2OTlkZTZk + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.nova-micro-v1%3A0/invoke + response: + body: + string: '{"output":{"message":{"content":[{"text":"It sounds like you might + be in the middle of"}],"role":"assistant"}},"stopReason":"max_tokens","usage":{"inputTokens":5,"outputTokens":10,"totalTokens":15}}' + headers: + Connection: + - keep-alive + Content-Length: + - '198' + Content-Type: + - application/json + Date: + - Wed, 22 Jan 2025 15:24:05 GMT + Set-Cookie: test_set_cookie + X-Amzn-Bedrock-Input-Token-Count: + - '5' + X-Amzn-Bedrock-Invocation-Latency: + - '237' + X-Amzn-Bedrock-Output-Token-Count: + - '10' + x-amzn-RequestId: + - 32f3134e-fc64-4db5-94bf-0279159cf79d + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.titan].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.titan].yaml new file mode 100644 index 0000000000..8eee055a28 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[amazon.titan].yaml @@ -0,0 +1,57 @@ +interactions: +- request: + body: '{"inputText": "Say this is a test", "textGenerationConfig": {"maxTokenCount": + 10, "temperature": 0.8, "topP": 1, "stopSequences": ["|"]}}' + headers: + Content-Length: + - '137' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjJUMTUyNDA1Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWZmMzM4ODA0LWMwMzYyNzgzNjczNjAzMWI0ZTZlZTIwNTtQYXJlbnQ9MmJjZmVlZGE5 + NWVjZWUyYztTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + YmZjOGJiMjEtY2Q2MS00MDNmLWE2NzEtZmQ4YmMzNzBkOTJl + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/invoke + response: + body: + string: '{"inputTextTokenCount":5,"results":[{"tokenCount":9,"outputText":" + comment\nHello! How are you?","completionReason":"FINISH"}]}' + headers: + Connection: + - keep-alive + Content-Length: + - '127' + Content-Type: + - application/json + Date: + - Wed, 22 Jan 2025 15:24:06 GMT + Set-Cookie: test_set_cookie + X-Amzn-Bedrock-Input-Token-Count: + - '5' + X-Amzn-Bedrock-Invocation-Latency: + - '1104' + X-Amzn-Bedrock-Output-Token-Count: + - '9' + x-amzn-RequestId: + - ef788ecb-b5ed-404e-ace7-de59741cded5 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[anthropic.claude].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[anthropic.claude].yaml new file mode 100644 index 0000000000..ab67c2dc4a --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[anthropic.claude].yaml @@ -0,0 +1,58 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test", + "type": "text"}]}], "anthropic_version": "bedrock-2023-05-31", "max_tokens": + 10, "temperature": 0.8, "top_p": 1, "stop_sequences": ["|"]}' + headers: + Content-Length: + - '211' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjJUMTUyNDA2Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWQ2MDZiNDAzLWFhYzE1Y2I3ODBiOTkwMmIxNGU1NWM4ZjtQYXJlbnQ9YjJmMzRlMThk + ZWE4NjdkMztTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + YTlhN2I5YzEtNmEyNy00MDFjLTljMWUtM2EyN2YxZGZhMjQ4 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-v2/invoke + response: + body: + string: '{"id":"msg_bdrk_01FJozYaVhprPHUzRZ2uVcMg","type":"message","role":"assistant","model":"claude-2.0","content":[{"type":"text","text":"OK, + I heard you say \"Say this is"}],"stop_reason":"max_tokens","stop_sequence":null,"usage":{"input_tokens":14,"output_tokens":10}}' + headers: + Connection: + - keep-alive + Content-Length: + - '265' + Content-Type: + - application/json + Date: + - Wed, 22 Jan 2025 15:24:07 GMT + Set-Cookie: test_set_cookie + X-Amzn-Bedrock-Input-Token-Count: + - '14' + X-Amzn-Bedrock-Invocation-Latency: + - '595' + X-Amzn-Bedrock-Output-Token-Count: + - '10' + x-amzn-RequestId: + - 5057dca6-bd9d-4e1e-9093-2bbbac1a19b4 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_invalid_model.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_invalid_model.yaml new file mode 100644 index 0000000000..fc1ba0425e --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_invalid_model.yaml @@ -0,0 +1,51 @@ +interactions: +- request: + body: null + headers: + Content-Length: + - '0' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjJUMTUyNDA3Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWVmZWZjYTdkLTM0OTI0ZjRmYTVlMDJmOTRhODFiY2M3NjtQYXJlbnQ9YWZiYmEwYjRh + MmU1NTQ0NDtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + ODI0ZDAwZDgtMmE1Yy00Mzk4LWIwYTItOWY5ZmNlYjQ2MGNh + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/does-not-exist/invoke + response: + body: + string: '{"message":"The provided model identifier is invalid."}' + headers: + Connection: + - keep-alive + Content-Length: + - '55' + Content-Type: + - application/json + Date: + - Wed, 22 Jan 2025 15:24:08 GMT + Set-Cookie: test_set_cookie + x-amzn-ErrorType: + - ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/ + x-amzn-RequestId: + - 9739ef10-1ae7-4694-ba63-3a39e7ca02c1 + status: + code: 400 + message: Bad Request +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py index 271c540da7..73aa055de8 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py @@ -1,11 +1,9 @@ """Unit tests configuration module.""" -import json import os import boto3 import pytest -import yaml from opentelemetry.instrumentation.botocore import BotocoreInstrumentor from opentelemetry.instrumentation.botocore.environment_variables import ( @@ -66,7 +64,7 @@ def environment(): if not os.getenv("AWS_SESSION_TOKEN"): os.environ["AWS_SESSION_TOKEN"] = "test_aws_session_token" if not os.getenv("AWS_DEFAULT_REGION"): - os.environ["AWS_DEFAULT_REGION"] = "eu-central-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" @pytest.fixture(scope="module") @@ -115,73 +113,6 @@ def instrument_with_content(tracer_provider, event_logger_provider): instrumentor.uninstrument() -class LiteralBlockScalar(str): - """Formats the string as a literal block scalar, preserving whitespace and - without interpreting escape characters""" - - -def literal_block_scalar_presenter(dumper, data): - """Represents a scalar string as a literal block, via '|' syntax""" - return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") - - -yaml.add_representer(LiteralBlockScalar, literal_block_scalar_presenter) - - -def process_string_value(string_value): - """Pretty-prints JSON or returns long strings as a LiteralBlockScalar""" - try: - json_data = json.loads(string_value) - return LiteralBlockScalar(json.dumps(json_data, indent=2)) - except (ValueError, TypeError): - if len(string_value) > 80: - return LiteralBlockScalar(string_value) - return string_value - - -def convert_body_to_literal(data): - """Searches the data for body strings, attempting to pretty-print JSON""" - if isinstance(data, dict): - for key, value in data.items(): - # Handle response body case (e.g., response.body.string) - if key == "body" and isinstance(value, dict) and "string" in value: - value["string"] = process_string_value(value["string"]) - - # Handle request body case (e.g., request.body) - elif key == "body" and isinstance(value, str): - data[key] = process_string_value(value) - - else: - convert_body_to_literal(value) - - elif isinstance(data, list): - for idx, choice in enumerate(data): - data[idx] = convert_body_to_literal(choice) - - return data - - -class PrettyPrintJSONBody: - """This makes request and response body recordings more readable.""" - - @staticmethod - def serialize(cassette_dict): - cassette_dict = convert_body_to_literal(cassette_dict) - return yaml.dump( - cassette_dict, default_flow_style=False, allow_unicode=True - ) - - @staticmethod - def deserialize(cassette_string): - return yaml.load(cassette_string, Loader=yaml.Loader) - - -@pytest.fixture(scope="module", autouse=True) -def fixture_vcr(vcr): - vcr.register_serializer("yaml", PrettyPrintJSONBody) - return vcr - - def scrub_response_headers(response): """ This scrubs sensitive response headers. Note they are case-sensitive! diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py index 8de7721bc9..9ee625eb3e 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -14,6 +14,8 @@ from __future__ import annotations +import json + import boto3 import pytest @@ -22,7 +24,10 @@ ) from opentelemetry.trace.status import StatusCode -from .bedrock_utils import assert_completion_attributes +from .bedrock_utils import ( + assert_completion_attributes, + assert_completion_attributes_from_streaming_body, +) BOTO3_VERSION = tuple(int(x) for x in boto3.__version__.split(".")) @@ -100,3 +105,126 @@ def test_converse_with_invalid_model( logs = log_exporter.get_finished_logs() assert len(logs) == 0 + + +def get_invoke_model_body( + llm_model, + max_tokens=None, + temperature=None, + top_p=None, + stop_sequences=None, +): + def set_if_not_none(config, key, value): + if value is not None: + config[key] = value + + prompt = "Say this is a test" + if llm_model == "amazon.nova-micro-v1:0": + config = {} + set_if_not_none(config, "max_new_tokens", max_tokens) + set_if_not_none(config, "temperature", temperature) + set_if_not_none(config, "topP", top_p) + set_if_not_none(config, "stopSequences", stop_sequences) + body = { + "messages": [{"role": "user", "content": [{"text": prompt}]}], + "inferenceConfig": config, + "schemaVersion": "messages-v1", + } + elif llm_model == "amazon.titan-text-lite-v1": + config = {} + set_if_not_none(config, "maxTokenCount", max_tokens) + set_if_not_none(config, "temperature", temperature) + set_if_not_none(config, "topP", top_p) + set_if_not_none(config, "stopSequences", stop_sequences) + body = {"inputText": prompt, "textGenerationConfig": config} + elif llm_model == "anthropic.claude-v2": + body = { + "messages": [ + {"role": "user", "content": [{"text": prompt, "type": "text"}]} + ], + "anthropic_version": "bedrock-2023-05-31", + } + set_if_not_none(body, "max_tokens", max_tokens) + set_if_not_none(body, "temperature", temperature) + set_if_not_none(body, "top_p", top_p) + set_if_not_none(body, "stop_sequences", stop_sequences) + else: + raise ValueError(f"No config for {llm_model}") + + return json.dumps(body) + + +def get_model_name_from_family(llm_model): + llm_model_name = { + "amazon.titan": "amazon.titan-text-lite-v1", + "amazon.nova": "amazon.nova-micro-v1:0", + "anthropic.claude": "anthropic.claude-v2", + } + return llm_model_name[llm_model] + + +@pytest.mark.parametrize( + "model_family", + ["amazon.nova", "amazon.titan", "anthropic.claude"], +) +@pytest.mark.vcr() +def test_invoke_model_with_content( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, + model_family, +): + llm_model_value = get_model_name_from_family(model_family) + max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] + body = get_invoke_model_body( + llm_model_value, max_tokens, temperature, top_p, stop_sequences + ) + response = bedrock_runtime_client.invoke_model( + body=body, + modelId=llm_model_value, + ) + + (span,) = span_exporter.get_finished_spans() + assert_completion_attributes_from_streaming_body( + span, + llm_model_value, + response, + "text_completion" if model_family == "amazon.titan" else "chat", + top_p, + temperature, + max_tokens, + stop_sequences, + ) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + +@pytest.mark.vcr() +def test_invoke_model_with_invalid_model( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + llm_model_value = "does-not-exist" + with pytest.raises(bedrock_runtime_client.exceptions.ClientError): + bedrock_runtime_client.invoke_model( + body=b"", + modelId=llm_model_value, + ) + + (span,) = span_exporter.get_finished_spans() + assert_completion_attributes_from_streaming_body( + span, + llm_model_value, + None, + "chat", + ) + + assert span.status.status_code == StatusCode.ERROR + assert span.attributes[ERROR_TYPE] == "ValidationException" + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 From 0bb1c42a78733765852da4d3e4eb9dda77f972c6 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Mon, 27 Jan 2025 10:01:07 +0100 Subject: [PATCH 07/16] botocore: add basic tracing for bedrock ConverseStream (#3204) * Add tracing for ConverseStream * Add converse stream example --- CHANGELOG.md | 2 + .../bedrock-runtime/zero-code/README.rst | 2 + .../zero-code/converse_stream.py | 26 +++++ .../instrumentation/botocore/__init__.py | 4 + .../botocore/extensions/bedrock.py | 32 +++++- .../botocore/extensions/bedrock_utils.py | 74 +++++++++++++ .../botocore/extensions/types.py | 8 ++ .../tests/bedrock_utils.py | 30 +++++- .../test_converse_stream_with_content.yaml | 69 ++++++++++++ ...st_converse_stream_with_invalid_model.yaml | 54 ++++++++++ .../tests/test_botocore_bedrock.py | 100 +++++++++++++++++- 11 files changed, 395 insertions(+), 6 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse_stream.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_content.yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_invalid_model.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 72c372ecb9..80da336aee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3161](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3161)) - `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock InvokeModel API ([#3200](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3200)) +- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock ConverseStream API + ([#3204](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3204)) ### Fixed diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst index 37e1db9b30..cdd678c765 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst @@ -18,6 +18,8 @@ Available examples ------------------ - `converse.py` uses `bedrock-runtime` `Converse API _`. +- `converse_stream.py` uses `bedrock-runtime` `ConverseStream API _`. +- `invoke_model.py` uses `bedrock-runtime` `InvokeModel API _`. Setup ----- diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse_stream.py b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse_stream.py new file mode 100644 index 0000000000..6bc0b33fdf --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse_stream.py @@ -0,0 +1,26 @@ +import os + +import boto3 + + +def main(): + client = boto3.client("bedrock-runtime") + stream = client.converse_stream( + modelId=os.getenv("CHAT_MODEL", "amazon.titan-text-lite-v1"), + messages=[ + { + "role": "user", + "content": [{"text": "Write a short poem on OpenTelemetry."}], + }, + ], + ) + + response = "" + for event in stream["stream"]: + if "contentBlockDelta" in event: + response += event["contentBlockDelta"]["delta"]["text"] + print(response) + + +if __name__ == "__main__": + main() diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py index 0481b248aa..b5598a3cf7 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py @@ -188,11 +188,15 @@ def _patched_api_call(self, original_func, instance, args, kwargs): } _safe_invoke(extension.extract_attributes, attributes) + end_span_on_exit = extension.should_end_span_on_exit() with self._tracer.start_as_current_span( call_context.span_name, kind=call_context.span_kind, attributes=attributes, + # tracing streaming services require to close the span manually + # at a later time after the stream has been consumed + end_on_exit=end_span_on_exit, ) as span: _safe_invoke(extension.before_service_call, span) self._call_request_hook(span, call_context) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index 66021d34ff..fb664bb1e4 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -23,8 +23,12 @@ import logging from typing import Any +from botocore.eventstream import EventStream from botocore.response import StreamingBody +from opentelemetry.instrumentation.botocore.extensions.bedrock_utils import ( + ConverseStreamWrapper, +) from opentelemetry.instrumentation.botocore.extensions.types import ( _AttributeMapT, _AwsSdkExtension, @@ -62,7 +66,14 @@ class _BedrockRuntimeExtension(_AwsSdkExtension): Amazon Bedrock Runtime. """ - _HANDLED_OPERATIONS = {"Converse", "InvokeModel"} + _HANDLED_OPERATIONS = {"Converse", "ConverseStream", "InvokeModel"} + _DONT_CLOSE_SPAN_ON_END_OPERATIONS = {"ConverseStream"} + + def should_end_span_on_exit(self): + return ( + self._call_context.operation + not in self._DONT_CLOSE_SPAN_ON_END_OPERATIONS + ) def extract_attributes(self, attributes: _AttributeMapT): if self._call_context.operation not in self._HANDLED_OPERATIONS: @@ -77,7 +88,7 @@ def extract_attributes(self, attributes: _AttributeMapT): GenAiOperationNameValues.CHAT.value ) - # Converse + # Converse / ConverseStream if inference_config := self._call_context.params.get( "inferenceConfig" ): @@ -251,6 +262,20 @@ def on_success(self, span: Span, result: dict[str, Any]): return if not span.is_recording(): + if not self.should_end_span_on_exit(): + span.end() + return + + # ConverseStream + if "stream" in result and isinstance(result["stream"], EventStream): + + def stream_done_callback(response): + self._converse_on_success(span, response) + span.end() + + result["stream"] = ConverseStreamWrapper( + result["stream"], stream_done_callback + ) return # Converse @@ -328,3 +353,6 @@ def on_error(self, span: Span, exception: _BotoClientErrorT): span.set_status(Status(StatusCode.ERROR, str(exception))) if span.is_recording(): span.set_attribute(ERROR_TYPE, type(exception).__qualname__) + + if not self.should_end_span_on_exit(): + span.end() diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py new file mode 100644 index 0000000000..55d90a2b9f --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py @@ -0,0 +1,74 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Includes work from: +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +from botocore.eventstream import EventStream +from wrapt import ObjectProxy + + +# pylint: disable=abstract-method +class ConverseStreamWrapper(ObjectProxy): + """Wrapper for botocore.eventstream.EventStream""" + + def __init__( + self, + stream: EventStream, + stream_done_callback, + ): + super().__init__(stream) + + self._stream_done_callback = stream_done_callback + # accumulating things in the same shape of non-streaming version + # {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish"} + self._response = {} + + def __iter__(self): + for event in self.__wrapped__: + self._process_event(event) + yield event + + def _process_event(self, event): + if "messageStart" in event: + # {'messageStart': {'role': 'assistant'}} + pass + + if "contentBlockDelta" in event: + # {'contentBlockDelta': {'delta': {'text': "Hello"}, 'contentBlockIndex': 0}} + pass + + if "contentBlockStop" in event: + # {'contentBlockStop': {'contentBlockIndex': 0}} + pass + + if "messageStop" in event: + # {'messageStop': {'stopReason': 'end_turn'}} + if stop_reason := event["messageStop"].get("stopReason"): + self._response["stopReason"] = stop_reason + + if "metadata" in event: + # {'metadata': {'usage': {'inputTokens': 12, 'outputTokens': 15, 'totalTokens': 27}, 'metrics': {'latencyMs': 2980}}} + if usage := event["metadata"].get("usage"): + self._response["usage"] = {} + if input_tokens := usage.get("inputTokens"): + self._response["usage"]["inputTokens"] = input_tokens + + if output_tokens := usage.get("outputTokens"): + self._response["usage"]["outputTokens"] = output_tokens + + self._stream_done_callback(self._response) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/types.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/types.py index a3c73af65c..2927c67e93 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/types.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/types.py @@ -101,6 +101,14 @@ def should_trace_service_call(self) -> bool: # pylint:disable=no-self-use """ return True + def should_end_span_on_exit(self) -> bool: # pylint:disable=no-self-use + """Returns if the span should be closed automatically on exit + + Extensions might override this function to disable automatic closing + of the span if they need to close it at a later time themselves. + """ + return True + def extract_attributes(self, attributes: _AttributeMapT): """Callback which gets invoked before the span is created. diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py index 460d3a4fb5..1467817e2e 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py @@ -91,7 +91,7 @@ def assert_completion_attributes_from_streaming_body( ) -def assert_completion_attributes( +def assert_converse_completion_attributes( span: ReadableSpan, request_model: str, response: dict[str, Any] | None, @@ -128,6 +128,34 @@ def assert_completion_attributes( ) +def assert_converse_stream_completion_attributes( + span: ReadableSpan, + request_model: str, + input_tokens: int | None = None, + output_tokens: int | None = None, + finish_reason: tuple[str] | None = None, + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: list[str] | None = None, +): + return assert_all_attributes( + span, + request_model, + input_tokens, + output_tokens, + finish_reason, + operation_name, + request_top_p, + request_temperature, + request_max_tokens, + tuple(request_stop_sequences) + if request_stop_sequences is not None + else request_stop_sequences, + ) + + def assert_equal_or_not_present(value, attribute_name, span): if value is not None: assert value == span.attributes[attribute_name] diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_content.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_content.yaml new file mode 100644 index 0000000000..96976f1e7c --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_content.yaml @@ -0,0 +1,69 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}], + "inferenceConfig": {"maxTokens": 10, "temperature": 0.8, "topP": 1, "stopSequences": + ["|"]}}' + headers: + Content-Length: + - '170' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjNUMDk1MTU2Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLTA0YmY4MjVjLTAxMTY5NjdhYWM1NmIxM2RlMDI1N2QwMjtQYXJlbnQ9MDdkM2U3N2Rl + OGFjMzJhNDtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + ZGQ1MTZiNTEtOGU1Yi00NGYyLTk5MzMtZjAwYzBiOGFkYWYw + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/converse-stream + response: + body: + string: !!binary | + AAAAlAAAAFLEwW5hCzpldmVudC10eXBlBwAMbWVzc2FnZVN0YXJ0DTpjb250ZW50LXR5cGUHABBh + cHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsicCI6ImFiY2RlZmdoaWprbG1u + b3BxcnN0dXZ3Iiwicm9sZSI6ImFzc2lzdGFudCJ9P+wfRAAAAMQAAABXjLhVJQs6ZXZlbnQtdHlw + ZQcAEWNvbnRlbnRCbG9ja0RlbHRhDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTpt + ZXNzYWdlLXR5cGUHAAVldmVudHsiY29udGVudEJsb2NrSW5kZXgiOjAsImRlbHRhIjp7InRleHQi + OiJIaSEgSG93IGNhbiBJIGhlbHAgeW91In0sInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzdHUifeBJ + 9mIAAACJAAAAVlvc+UsLOmV2ZW50LXR5cGUHABBjb250ZW50QmxvY2tTdG9wDTpjb250ZW50LXR5 + cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsiY29udGVudEJsb2Nr + SW5kZXgiOjAsInAiOiJhYmNkZSJ95xzwrwAAAKcAAABRu0n9jQs6ZXZlbnQtdHlwZQcAC21lc3Nh + Z2VTdG9wDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVl + dmVudHsicCI6ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6QUJDREVGR0hJSiIsInN0b3BSZWFz + b24iOiJtYXhfdG9rZW5zIn1LR3pNAAAAygAAAE5X40OECzpldmVudC10eXBlBwAIbWV0YWRhdGEN + OmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJt + ZXRyaWNzIjp7ImxhdGVuY3lNcyI6NjA4fSwicCI6ImFiY2RlZmdoaWprIiwidXNhZ2UiOnsiaW5w + dXRUb2tlbnMiOjgsIm91dHB1dFRva2VucyI6MTAsInRvdGFsVG9rZW5zIjoxOH19iiQr+w== + headers: + Connection: + - keep-alive + Content-Type: + - application/vnd.amazon.eventstream + Date: + - Thu, 23 Jan 2025 09:51:56 GMT + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + x-amzn-RequestId: + - 2b74a5d3-615a-4f81-b00f-f0b10a618e23 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_invalid_model.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_invalid_model.yaml new file mode 100644 index 0000000000..59929a1bc7 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_with_invalid_model.yaml @@ -0,0 +1,54 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}]}' + headers: + Content-Length: + - '77' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjNUMDk1MTU3Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLTI5NzA1OTZhLTEyZWI5NDk2ODA1ZjZhYzE5YmU3ODM2NztQYXJlbnQ9Y2M0OTA0YWE2 + ZjQ2NmYxYTtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + MjQzZWY2ZDgtNGJhNy00YTVlLWI0MGEtYThiNDE2ZDIzYjhk + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/does-not-exist/converse-stream + response: + body: + string: '{"message":"The provided model identifier is invalid."}' + headers: + Connection: + - keep-alive + Content-Length: + - '55' + Content-Type: + - application/json + Date: + - Thu, 23 Jan 2025 09:51:57 GMT + Set-Cookie: test_set_cookie + x-amzn-ErrorType: + - ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/ + x-amzn-RequestId: + - 358b122c-d045-4d8f-a5bb-b0bd8cf6ee59 + status: + code: 400 + message: Bad Request +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py index 9ee625eb3e..ce3b4375e9 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -25,8 +25,9 @@ from opentelemetry.trace.status import StatusCode from .bedrock_utils import ( - assert_completion_attributes, assert_completion_attributes_from_streaming_body, + assert_converse_completion_attributes, + assert_converse_stream_completion_attributes, ) BOTO3_VERSION = tuple(int(x) for x in boto3.__version__.split(".")) @@ -58,7 +59,7 @@ def test_converse_with_content( ) (span,) = span_exporter.get_finished_spans() - assert_completion_attributes( + assert_converse_completion_attributes( span, llm_model_value, response, @@ -93,7 +94,7 @@ def test_converse_with_invalid_model( ) (span,) = span_exporter.get_finished_spans() - assert_completion_attributes( + assert_converse_completion_attributes( span, llm_model_value, None, @@ -107,6 +108,99 @@ def test_converse_with_invalid_model( assert len(logs) == 0 +@pytest.mark.skipif( + BOTO3_VERSION < (1, 35, 56), reason="ConverseStream API not available" +) +@pytest.mark.vcr() +def test_converse_stream_with_content( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + # pylint:disable=too-many-locals + messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] + + llm_model_value = "amazon.titan-text-lite-v1" + max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] + response = bedrock_runtime_client.converse_stream( + messages=messages, + modelId=llm_model_value, + inferenceConfig={ + "maxTokens": max_tokens, + "temperature": temperature, + "topP": top_p, + "stopSequences": stop_sequences, + }, + ) + + # consume the stream in order to have it traced + finish_reason = None + input_tokens, output_tokens = None, None + text = "" + for event in response["stream"]: + if "contentBlockDelta" in event: + text += event["contentBlockDelta"]["delta"]["text"] + if "messageStop" in event: + finish_reason = (event["messageStop"]["stopReason"],) + if "metadata" in event: + usage = event["metadata"]["usage"] + input_tokens = usage["inputTokens"] + output_tokens = usage["outputTokens"] + + assert text + + (span,) = span_exporter.get_finished_spans() + assert_converse_stream_completion_attributes( + span, + llm_model_value, + input_tokens, + output_tokens, + finish_reason, + "chat", + top_p, + temperature, + max_tokens, + stop_sequences, + ) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + +@pytest.mark.skipif( + BOTO3_VERSION < (1, 35, 56), reason="ConverseStream API not available" +) +@pytest.mark.vcr() +def test_converse_stream_with_invalid_model( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] + + llm_model_value = "does-not-exist" + with pytest.raises(bedrock_runtime_client.exceptions.ValidationException): + bedrock_runtime_client.converse_stream( + messages=messages, + modelId=llm_model_value, + ) + + (span,) = span_exporter.get_finished_spans() + assert_converse_stream_completion_attributes( + span, + llm_model_value, + operation_name="chat", + ) + + assert span.status.status_code == StatusCode.ERROR + assert span.attributes[ERROR_TYPE] == "ValidationException" + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + def get_invoke_model_body( llm_model, max_tokens=None, From 5478a0b77a1df4c74d72e6c080afa52f99e0bee8 Mon Sep 17 00:00:00 2001 From: guillaumep Date: Mon, 27 Jan 2025 10:12:21 -0500 Subject: [PATCH 08/16] Add pymssql instrumentation (#394) * Add pymssql instrumentation * Run tox -e generate * tox -e generate-workflows --------- Co-authored-by: Riccardo Magliocchetti --- .github/component_owners.yml | 3 + .github/workflows/core_contrib_test_0.yml | 22 ++ .github/workflows/lint_0.yml | 18 ++ .github/workflows/test_1.yml | 216 +++++++++--------- .github/workflows/test_2.yml | 108 +++++++++ CHANGELOG.md | 4 +- docs-requirements.txt | 1 + docs/instrumentation/pymssql/pymssql.rst | 7 + instrumentation/README.md | 1 + .../instrumentation/dbapi/__init__.py | 12 +- .../tests/test_dbapi_integration.py | 114 +++++---- .../LICENSE | 201 ++++++++++++++++ .../README.rst | 21 ++ .../pyproject.toml | 56 +++++ .../instrumentation/pymssql/__init__.py | 208 +++++++++++++++++ .../instrumentation/pymssql/package.py | 16 ++ .../instrumentation/pymssql/version.py | 15 ++ .../test-requirements.txt | 10 + .../tests/__init__.py | 0 .../tests/test_pymssql_integration.py | 184 +++++++++++++++ .../pyproject.toml | 1 + .../instrumentation/bootstrap_gen.py | 4 + tox.ini | 13 ++ 23 files changed, 1080 insertions(+), 155 deletions(-) create mode 100644 docs/instrumentation/pymssql/pymssql.rst create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/LICENSE create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/README.rst create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/__init__.py create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/package.py create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/version.py create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/test-requirements.txt create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/tests/__init__.py create mode 100644 instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py diff --git a/.github/component_owners.yml b/.github/component_owners.yml index 5cbb6aa402..81a9647788 100644 --- a/.github/component_owners.yml +++ b/.github/component_owners.yml @@ -61,6 +61,9 @@ components: instrumentation/opentelemetry-instrumentation-psycopg: - federicobond + instrumentation/opentelemetry-instrumentation-pymssql: + - guillaumep + instrumentation/opentelemetry-instrumentation-aiokafka: - dimastbk diff --git a/.github/workflows/core_contrib_test_0.yml b/.github/workflows/core_contrib_test_0.yml index 6a70ce8380..a8336e093f 100644 --- a/.github/workflows/core_contrib_test_0.yml +++ b/.github/workflows/core_contrib_test_0.yml @@ -1273,6 +1273,28 @@ jobs: - name: Run tests run: tox -e py38-test-instrumentation-pymysql -- -ra + py38-test-instrumentation-pymssql: + name: instrumentation-pymssql + runs-on: ubuntu-latest + steps: + - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} + uses: actions/checkout@v4 + with: + repository: open-telemetry/opentelemetry-python-contrib + ref: ${{ env.CONTRIB_REPO_SHA }} + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + architecture: "x64" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-pymssql -- -ra + py38-test-instrumentation-pyramid: name: instrumentation-pyramid runs-on: ubuntu-latest diff --git a/.github/workflows/lint_0.yml b/.github/workflows/lint_0.yml index 34db823570..cc7cfd536f 100644 --- a/.github/workflows/lint_0.yml +++ b/.github/workflows/lint_0.yml @@ -664,6 +664,24 @@ jobs: - name: Run tests run: tox -e lint-instrumentation-pymysql + lint-instrumentation-pymssql: + name: instrumentation-pymssql + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e lint-instrumentation-pymssql + lint-instrumentation-pyramid: name: instrumentation-pyramid runs-on: ubuntu-latest diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml index b27fe28466..f2686cfe91 100644 --- a/.github/workflows/test_1.yml +++ b/.github/workflows/test_1.yml @@ -2338,6 +2338,114 @@ jobs: - name: Run tests run: tox -e pypy3-test-instrumentation-pymysql -- -ra + py38-test-instrumentation-pymssql_ubuntu-latest: + name: instrumentation-pymssql 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-pymssql -- -ra + + py39-test-instrumentation-pymssql_ubuntu-latest: + name: instrumentation-pymssql 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-instrumentation-pymssql -- -ra + + py310-test-instrumentation-pymssql_ubuntu-latest: + name: instrumentation-pymssql 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-instrumentation-pymssql -- -ra + + py311-test-instrumentation-pymssql_ubuntu-latest: + name: instrumentation-pymssql 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py311-test-instrumentation-pymssql -- -ra + + py312-test-instrumentation-pymssql_ubuntu-latest: + name: instrumentation-pymssql 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-pymssql -- -ra + + py313-test-instrumentation-pymssql_ubuntu-latest: + name: instrumentation-pymssql 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-instrumentation-pymssql -- -ra + py38-test-instrumentation-pyramid_ubuntu-latest: name: instrumentation-pyramid 3.8 Ubuntu runs-on: ubuntu-latest @@ -4407,111 +4515,3 @@ jobs: - name: Run tests run: tox -e py311-test-instrumentation-httpx-1 -- -ra - - py312-test-instrumentation-httpx-0_ubuntu-latest: - name: instrumentation-httpx-0 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py312-test-instrumentation-httpx-0 -- -ra - - py312-test-instrumentation-httpx-1_ubuntu-latest: - name: instrumentation-httpx-1 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py312-test-instrumentation-httpx-1 -- -ra - - py313-test-instrumentation-httpx-1_ubuntu-latest: - name: instrumentation-httpx-1 3.13 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.13 - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py313-test-instrumentation-httpx-1 -- -ra - - pypy3-test-instrumentation-httpx-0_ubuntu-latest: - name: instrumentation-httpx-0 pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e pypy3-test-instrumentation-httpx-0 -- -ra - - pypy3-test-instrumentation-httpx-1_ubuntu-latest: - name: instrumentation-httpx-1 pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e pypy3-test-instrumentation-httpx-1 -- -ra - - py38-test-util-http_ubuntu-latest: - name: util-http 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py38-test-util-http -- -ra diff --git a/.github/workflows/test_2.yml b/.github/workflows/test_2.yml index d9b622c5c3..96ebfba82a 100644 --- a/.github/workflows/test_2.yml +++ b/.github/workflows/test_2.yml @@ -16,6 +16,114 @@ env: jobs: + py312-test-instrumentation-httpx-0_ubuntu-latest: + name: instrumentation-httpx-0 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-httpx-0 -- -ra + + py312-test-instrumentation-httpx-1_ubuntu-latest: + name: instrumentation-httpx-1 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-httpx-1 -- -ra + + py313-test-instrumentation-httpx-1_ubuntu-latest: + name: instrumentation-httpx-1 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-instrumentation-httpx-1 -- -ra + + pypy3-test-instrumentation-httpx-0_ubuntu-latest: + name: instrumentation-httpx-0 pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e pypy3-test-instrumentation-httpx-0 -- -ra + + pypy3-test-instrumentation-httpx-1_ubuntu-latest: + name: instrumentation-httpx-1 pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e pypy3-test-instrumentation-httpx-1 -- -ra + + py38-test-util-http_ubuntu-latest: + name: util-http 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-util-http -- -ra + py39-test-util-http_ubuntu-latest: name: util-http 3.9 Ubuntu runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 80da336aee..8f78a8d50f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3200](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3200)) - `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock ConverseStream API ([#3204](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3204)) +- `opentelemetry-instrumentation-pymssql` Add pymssql instrumentation + ([#394](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/394)) ### Fixed @@ -69,7 +71,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-instrumentation-dbapi` including sqlcomment in `db.statement` span attribute value is now opt-in ([#3115](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3115)) - ### Breaking changes - `opentelemetry-instrumentation-dbapi` including sqlcomment in `db.statement` span attribute value is now opt-in @@ -77,7 +78,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-instrumentation-psycopg2`, `opentelemetry-instrumentation-psycopg`, `opentelemetry-instrumentation-mysqlclient`, `opentelemetry-instrumentation-pymysql`: including sqlcomment in `db.statement` span attribute value is now opt-in ([#3121](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3121)) - ## Version 1.29.0/0.50b0 (2024-12-11) ### Added diff --git a/docs-requirements.txt b/docs-requirements.txt index d547e806a3..0e36426fe3 100644 --- a/docs-requirements.txt +++ b/docs-requirements.txt @@ -35,6 +35,7 @@ psycopg~=3.1.17 pika>=0.12.0 pymongo~=4.6.3 PyMySQL~=1.1.1 +pymssql~=2.3.2 pyramid>=1.7 redis>=2.6 remoulade>=0.50 diff --git a/docs/instrumentation/pymssql/pymssql.rst b/docs/instrumentation/pymssql/pymssql.rst new file mode 100644 index 0000000000..0b1b589cb9 --- /dev/null +++ b/docs/instrumentation/pymssql/pymssql.rst @@ -0,0 +1,7 @@ +OpenTelemetry pymssql Instrumentation +===================================== + +.. automodule:: opentelemetry.instrumentation.pymssql + :members: + :undoc-members: + :show-inheritance: diff --git a/instrumentation/README.md b/instrumentation/README.md index a229951b4b..dcfcf46edd 100644 --- a/instrumentation/README.md +++ b/instrumentation/README.md @@ -35,6 +35,7 @@ | [opentelemetry-instrumentation-psycopg2](./opentelemetry-instrumentation-psycopg2) | psycopg2 >= 2.7.3.1,psycopg2-binary >= 2.7.3.1 | No | experimental | [opentelemetry-instrumentation-pymemcache](./opentelemetry-instrumentation-pymemcache) | pymemcache >= 1.3.5, < 5 | No | experimental | [opentelemetry-instrumentation-pymongo](./opentelemetry-instrumentation-pymongo) | pymongo >= 3.1, < 5.0 | No | experimental +| [opentelemetry-instrumentation-pymssql](./opentelemetry-instrumentation-pymssql) | pymssql >= 2.1.5, < 3 | No | experimental | [opentelemetry-instrumentation-pymysql](./opentelemetry-instrumentation-pymysql) | PyMySQL < 2 | No | experimental | [opentelemetry-instrumentation-pyramid](./opentelemetry-instrumentation-pyramid) | pyramid >= 1.7 | Yes | experimental | [opentelemetry-instrumentation-redis](./opentelemetry-instrumentation-redis) | redis >= 2.6 | No | experimental diff --git a/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py b/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py index 27aafc7308..c7b1dee3b2 100644 --- a/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py @@ -204,10 +204,12 @@ def instrument_connection( commenter_options: dict[str, Any] | None = None, connect_module: Callable[..., Any] | None = None, enable_attribute_commenter: bool = False, + db_api_integration_factory: type[DatabaseApiIntegration] | None = None, ) -> TracedConnectionProxy[ConnectionT]: """Enable instrumentation in a database connection. Args: + name: The instrumentation module name. connection: The connection to instrument. database_system: An identifier for the database management system (DBMS) product being used. @@ -220,6 +222,10 @@ def instrument_connection( commenter_options: Configurations for tags to be appended at the sql query. connect_module: Module name where connect method is available. enable_attribute_commenter: Flag to enable/disable sqlcomment inclusion in `db.statement` span attribute. Only available if enable_commenter=True. + db_api_integration_factory: A class or factory function to use as a + replacement for :class:`DatabaseApiIntegration`. Can be used to + obtain connection attributes from the connect method instead of + from the connection itself (as done by the pymssql intrumentor). Returns: An instrumented connection. @@ -228,7 +234,11 @@ def instrument_connection( _logger.warning("Connection already instrumented") return connection - db_integration = DatabaseApiIntegration( + db_api_integration_factory = ( + db_api_integration_factory or DatabaseApiIntegration + ) + + db_integration = db_api_integration_factory( name, database_system, connection_attributes=connection_attributes, diff --git a/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py b/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py index 3d531fb791..97d53f33ec 100644 --- a/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py +++ b/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py @@ -46,7 +46,9 @@ def test_span_succeeded(self): "user": "user", } db_integration = dbapi.DatabaseApiIntegration( - "testname", "testcomponent", connection_attributes + "instrumenting_module_test_name", + "testcomponent", + connection_attributes, ) mock_connection = db_integration.wrapped_connection( mock_connect, {}, connection_props @@ -78,7 +80,7 @@ def test_span_succeeded(self): def test_span_name(self): db_integration = dbapi.DatabaseApiIntegration( - "testname", "testcomponent", {} + "instrumenting_module_test_name", "testcomponent", {} ) mock_connection = db_integration.wrapped_connection( mock_connect, {}, {} @@ -117,7 +119,7 @@ def test_span_succeeded_with_capture_of_statement_parameters(self): "user": "user", } db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "testcomponent", connection_attributes, capture_parameters=True, @@ -169,7 +171,9 @@ def test_span_not_recording(self): mock_span = mock.Mock() mock_span.is_recording.return_value = False db_integration = dbapi.DatabaseApiIntegration( - "testname", "testcomponent", connection_attributes + "instrumenting_module_test_name", + "testcomponent", + connection_attributes, ) mock_connection = db_integration.wrapped_connection( mock_connect, {}, connection_props @@ -183,7 +187,7 @@ def test_span_not_recording(self): def test_span_failed(self): db_integration = dbapi.DatabaseApiIntegration( - self.tracer, "testcomponent" + "instrumenting_module_test_name", "testcomponent" ) mock_connection = db_integration.wrapped_connection( mock_connect, {}, {} @@ -207,7 +211,9 @@ def test_custom_tracer_provider_dbapi(self): tracer_provider, exporter = result db_integration = dbapi.DatabaseApiIntegration( - self.tracer, "testcomponent", tracer_provider=tracer_provider + "instrumenting_module_test_name", + "testcomponent", + tracer_provider=tracer_provider, ) mock_connection = db_integration.wrapped_connection( mock_connect, {}, {} @@ -224,7 +230,7 @@ def test_custom_tracer_provider_dbapi(self): def test_no_op_tracer_provider(self): db_integration = dbapi.DatabaseApiIntegration( - self.tracer, + "instrumenting_module_test_name", "testcomponent", tracer_provider=trace_api.NoOpTracerProvider(), ) @@ -239,7 +245,7 @@ def test_no_op_tracer_provider(self): def test_executemany(self): db_integration = dbapi.DatabaseApiIntegration( - "testname", "testcomponent" + "instrumenting_module_test_name", "testcomponent" ) mock_connection = db_integration.wrapped_connection( mock_connect, {}, {} @@ -263,7 +269,7 @@ def test_executemany_comment(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": False, "dbapi_level": False}, @@ -296,7 +302,7 @@ def test_executemany_comment_stmt_enabled(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": False, "dbapi_level": False}, @@ -333,7 +339,7 @@ def __getattr__(self, name): connect_module = MockConnectModule() db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, connect_module=connect_module, @@ -369,7 +375,7 @@ def __getattr__(self, name): connect_module = MockConnectModule() db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, connect_module=connect_module, @@ -404,7 +410,7 @@ def test_executemany_comment_stmt_enabled_matches_db_statement_attribute( connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": False, "dbapi_level": False}, @@ -445,7 +451,7 @@ def test_compatible_build_version_psycopg_psycopg2_libpq(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": False, "dbapi_level": False}, @@ -481,7 +487,7 @@ def test_compatible_build_version_psycopg_psycopg2_libpq_stmt_enabled( connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": False, "dbapi_level": False}, @@ -515,7 +521,7 @@ def test_executemany_psycopg2_integration_comment(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -548,7 +554,7 @@ def test_executemany_psycopg2_integration_comment_stmt_enabled(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -583,7 +589,7 @@ def test_executemany_psycopg_integration_comment(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -617,7 +623,7 @@ def test_executemany_psycopg_integration_comment_stmt_enabled(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -650,7 +656,7 @@ def test_executemany_mysqlconnector_integration_comment(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "mysql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -683,7 +689,7 @@ def test_executemany_mysqlconnector_integration_comment_stmt_enabled(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "mysql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -726,7 +732,7 @@ def test_executemany_mysqlclient_integration_comment( ) db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "mysql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -768,7 +774,7 @@ def test_executemany_mysqlclient_integration_comment_stmt_enabled( ) db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "mysql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -803,7 +809,7 @@ def test_executemany_pymysql_integration_comment(self): connect_module.get_client_info = mock.MagicMock(return_value="123") db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "mysql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -837,7 +843,7 @@ def test_executemany_pymysql_integration_comment_stmt_enabled(self): connect_module.get_client_info = mock.MagicMock(return_value="123") db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "mysql", enable_commenter=True, commenter_options={"db_driver": True, "dbapi_level": False}, @@ -872,7 +878,7 @@ def test_executemany_flask_integration_comment(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": False, "dbapi_level": False}, @@ -916,7 +922,7 @@ def test_executemany_flask_integration_comment_stmt_enabled(self): connect_module.paramstyle = "test" db_integration = dbapi.DatabaseApiIntegration( - "testname", + "instrumenting_module_test_name", "postgresql", enable_commenter=True, commenter_options={"db_driver": False, "dbapi_level": False}, @@ -953,7 +959,7 @@ def test_executemany_flask_integration_comment_stmt_enabled(self): def test_callproc(self): db_integration = dbapi.DatabaseApiIntegration( - "testname", "testcomponent" + "instrumenting_module_test_name", "testcomponent" ) mock_connection = db_integration.wrapped_connection( mock_connect, {}, {} @@ -987,15 +993,19 @@ def test_unwrap_connect(self, mock_dbapi): self.assertIsInstance(connection, mock.Mock) def test_instrument_connection(self): - connection = mock.Mock() + mocked_conn = MockConnection("dbname", "999", "dbhost", "dbuser") # Avoid get_attributes failing because can't concatenate mock - connection.database = "-" - connection2 = dbapi.instrument_connection(self.tracer, connection, "-") - self.assertIs(connection2.__wrapped__, connection) + connection2 = dbapi.instrument_connection( + "instrumenting_module_test_name", mocked_conn, "dbname" + ) + self.assertIs(connection2.__wrapped__, mocked_conn) @mock.patch("opentelemetry.instrumentation.dbapi.DatabaseApiIntegration") def test_instrument_connection_kwargs_defaults(self, mock_dbapiint): - dbapi.instrument_connection(self.tracer, mock.Mock(), "foo") + mocked_conn = MockConnection("dbname", "999", "dbhost", "dbuser") + dbapi.instrument_connection( + "instrumenting_module_test_name", mocked_conn, "foo" + ) kwargs = mock_dbapiint.call_args[1] self.assertEqual(kwargs["connection_attributes"], None) self.assertEqual(kwargs["version"], "") @@ -1008,11 +1018,12 @@ def test_instrument_connection_kwargs_defaults(self, mock_dbapiint): @mock.patch("opentelemetry.instrumentation.dbapi.DatabaseApiIntegration") def test_instrument_connection_kwargs_provided(self, mock_dbapiint): + mocked_conn = MockConnection("dbname", "999", "dbhost", "dbuser") mock_tracer_provider = mock.MagicMock() mock_connect_module = mock.MagicMock() dbapi.instrument_connection( - self.tracer, - mock.Mock(), + "instrumenting_module_test_name", + mocked_conn, "foo", connection_attributes={"foo": "bar"}, version="test", @@ -1033,20 +1044,35 @@ def test_instrument_connection_kwargs_provided(self, mock_dbapiint): self.assertIs(kwargs["connect_module"], mock_connect_module) self.assertEqual(kwargs["enable_attribute_commenter"], True) + def test_instrument_connection_db_api_integration_factory(self): + mocked_conn = MockConnection("dbname", "999", "dbhost", "dbuser") + + class DBApiIntegrationTestClass(dbapi.DatabaseApiIntegration): + pass + + conn = dbapi.instrument_connection( + "instrumenting_module_test_name", + mocked_conn, + "dbsystem", + db_api_integration_factory=DBApiIntegrationTestClass, + ) + self.assertIsInstance( + conn._self_db_api_integration, DBApiIntegrationTestClass + ) + def test_uninstrument_connection(self): - connection = mock.Mock() - # Set connection.database to avoid a failure because mock can't - # be concatenated - connection.database = "-" - connection2 = dbapi.instrument_connection(self.tracer, connection, "-") - self.assertIs(connection2.__wrapped__, connection) + mocked_conn = MockConnection("dbname", "999", "dbhost", "dbuser") + connection2 = dbapi.instrument_connection( + "instrumenting_module_test_name", mocked_conn, "-" + ) + self.assertIs(connection2.__wrapped__, mocked_conn) connection3 = dbapi.uninstrument_connection(connection2) - self.assertIs(connection3, connection) + self.assertIs(connection3, mocked_conn) with self.assertLogs(level=logging.WARNING): - connection4 = dbapi.uninstrument_connection(connection) - self.assertIs(connection4, connection) + connection4 = dbapi.uninstrument_connection(mocked_conn) + self.assertIs(connection4, mocked_conn) # pylint: disable=unused-argument diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/LICENSE b/instrumentation/opentelemetry-instrumentation-pymssql/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/README.rst b/instrumentation/opentelemetry-instrumentation-pymssql/README.rst new file mode 100644 index 0000000000..b885d6b5c5 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/README.rst @@ -0,0 +1,21 @@ +OpenTelemetry pymssql Instrumentation +===================================== + +|pypi| + +.. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-pymssql.svg + :target: https://pypi.org/project/opentelemetry-instrumentation-pymssql/ + +Installation +------------ + +:: + + pip install opentelemetry-instrumentation-pymssql + + +References +---------- +* `OpenTelemetry pymssql Instrumentation `_ +* `OpenTelemetry Project `_ +* `OpenTelemetry Python Examples `_ diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml b/instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml new file mode 100644 index 0000000000..5360007a5c --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml @@ -0,0 +1,56 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "opentelemetry-instrumentation-pymssql" +dynamic = ["version"] +description = "OpenTelemetry pymssql instrumentation" +readme = "README.rst" +license = "Apache-2.0" +requires-python = ">=3.8" +authors = [ + { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] +dependencies = [ + "opentelemetry-api ~= 1.12", + "opentelemetry-instrumentation == 0.51b0.dev", + "opentelemetry-instrumentation-dbapi == 0.51b0.dev", +] + +[project.optional-dependencies] +instruments = [ + "pymssql >= 2.1.5, < 3", +] + +[project.entry-points.opentelemetry_instrumentor] +pymssql = "opentelemetry.instrumentation.pymssql:pymssqlInstrumentor" + +[project.urls] +Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymssql" +Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib" + +[tool.hatch.version] +path = "src/opentelemetry/instrumentation/pymssql/version.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/src", + "/tests", +] + +[tool.hatch.build.targets.wheel] +packages = ["src/opentelemetry"] diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/__init__.py b/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/__init__.py new file mode 100644 index 0000000000..2f2ca5f4a2 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/__init__.py @@ -0,0 +1,208 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The integration with pymssql supports the `pymssql`_ library and can be enabled +by using ``PyMSSQLInstrumentor``. + +.. _pymssql: https://pypi.org/project/pymssql/ + +Usage +----- + +.. code:: python + + import pymssql + from opentelemetry.instrumentation.pymssql import PyMSSQLInstrumentor + + PyMSSQLInstrumentor().instrument() + + cnx = pymssql.connect(database="MSSQL_Database") + cursor = cnx.cursor() + cursor.execute("INSERT INTO test (testField) VALUES (123)" + cnx.commit() + cursor.close() + cnx.close() + +.. code:: python + + import pymssql + from opentelemetry.instrumentation.pymssql import PyMSSQLInstrumentor + + # Alternatively, use instrument_connection for an individual connection + cnx = pymssql.connect(database="MSSQL_Database") + instrumented_cnx = PyMSSQLInstrumentor().instrument_connection(cnx) + cursor = instrumented_cnx.cursor() + cursor.execute("INSERT INTO test (testField) VALUES (123)" + instrumented_cnx.commit() + cursor.close() + instrumented_cnx.close() + +API +--- +The `instrument` method accepts the following keyword args: + +tracer_provider (TracerProvider) - an optional tracer provider + +For example: + +.. code: python + + import pymssql + from opentelemetry.instrumentation.pymssql import PyMSSQLInstrumentor + from opentelemetry.trace import NoOpTracerProvider + + PyMSSQLInstrumentor().instrument(tracer_provider=NoOpTracerProvider()) +""" + +from __future__ import annotations + +from typing import Any, Callable, Collection, NamedTuple + +import pymssql + +from opentelemetry.instrumentation import dbapi +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.pymssql.package import _instruments +from opentelemetry.instrumentation.pymssql.version import __version__ + +_DATABASE_SYSTEM = "mssql" + + +class _PyMSSQLConnectMethodArgsTuple(NamedTuple): + server: str | None = None + user: str | None = None + password: str | None = None + database: str | None = None + timeout: int | None = None + login_timeout: int | None = None + charset: str | None = None + as_dict: bool | None = None + host: str | None = None + appname: str | None = None + port: str | None = None + conn_properties: str | None = None + autocommit: bool | None = None + tds_version: str | None = None + + +class _PyMSSQLDatabaseApiIntegration(dbapi.DatabaseApiIntegration): + def wrapped_connection( + self, + connect_method: Callable[..., Any], + args: tuple[Any, Any], + kwargs: dict[Any, Any], + ): + """Add object proxy to connection object.""" + connection = connect_method(*args, **kwargs) + connect_method_args = _PyMSSQLConnectMethodArgsTuple(*args) + + self.name = self.database_system + self.database = kwargs.get("database") or connect_method_args.database + + user = kwargs.get("user") or connect_method_args.user + if user is not None: + self.span_attributes["db.user"] = user + + port = kwargs.get("port") or connect_method_args.port + host = kwargs.get("server") or connect_method_args.server + if host is None: + host = kwargs.get("host") or connect_method_args.host + if host is not None: + # The host string can include the port, separated by either a coma or + # a column + for sep in (":", ","): + if sep in host: + tokens = host.rsplit(sep) + host = tokens[0] + if len(tokens) > 1: + port = tokens[1] + if host is not None: + self.span_attributes["net.peer.name"] = host + if port is not None: + self.span_attributes["net.peer.port"] = port + + charset = kwargs.get("charset") or connect_method_args.charset + if charset is not None: + self.span_attributes["db.charset"] = charset + + tds_version = ( + kwargs.get("tds_version") or connect_method_args.tds_version + ) + if tds_version is not None: + self.span_attributes["db.protocol.tds.version"] = tds_version + + return dbapi.get_traced_connection_proxy(connection, self) + + +class PyMSSQLInstrumentor(BaseInstrumentor): + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs): + """Integrate with the pymssql library. + https://github.com/pymssql/pymssql/ + """ + tracer_provider = kwargs.get("tracer_provider") + + dbapi.wrap_connect( + __name__, + pymssql, + "connect", + _DATABASE_SYSTEM, + version=__version__, + tracer_provider=tracer_provider, + # pymssql does not keep the connection attributes in its connection object; + # instead, we get the attributes from the connect method (which is done + # via PyMSSQLDatabaseApiIntegration.wrapped_connection) + db_api_integration_factory=_PyMSSQLDatabaseApiIntegration, + ) + + def _uninstrument(self, **kwargs): + """ "Disable pymssql instrumentation""" + dbapi.unwrap_connect(pymssql, "connect") + + @staticmethod + def instrument_connection(connection, tracer_provider=None): + """Enable instrumentation in a pymssql connection. + + Args: + connection: The connection to instrument. + tracer_provider: The optional tracer provider to use. If omitted + the current globally configured one is used. + + Returns: + An instrumented connection. + """ + + return dbapi.instrument_connection( + __name__, + connection, + _DATABASE_SYSTEM, + version=__version__, + tracer_provider=tracer_provider, + db_api_integration_factory=_PyMSSQLDatabaseApiIntegration, + ) + + @staticmethod + def uninstrument_connection(connection): + """Disable instrumentation in a pymssql connection. + + Args: + connection: The connection to uninstrument. + + Returns: + An uninstrumented connection. + """ + return dbapi.uninstrument_connection(connection) diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/package.py b/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/package.py new file mode 100644 index 0000000000..92bc9e0193 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/package.py @@ -0,0 +1,16 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("pymssql >= 2.1.5, < 3",) diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/version.py b/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/version.py new file mode 100644 index 0000000000..6e2923f0db --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/src/opentelemetry/instrumentation/pymssql/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.51b0.dev" diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/test-requirements.txt b/instrumentation/opentelemetry-instrumentation-pymssql/test-requirements.txt new file mode 100644 index 0000000000..19e4bb3e3f --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/test-requirements.txt @@ -0,0 +1,10 @@ +exceptiongroup==1.2.2 +iniconfig==2.0.0 +packaging==24.2 +pluggy==1.5.0 +pymssql==2.3.1 +pytest==7.4.4 +tomli==2.2.1 +-e opentelemetry-instrumentation +-e instrumentation/opentelemetry-instrumentation-dbapi +-e instrumentation/opentelemetry-instrumentation-pymssql diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/tests/__init__.py b/instrumentation/opentelemetry-instrumentation-pymssql/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py b/instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py new file mode 100644 index 0000000000..ccd522b165 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py @@ -0,0 +1,184 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import Mock, patch + +import pymssql # type: ignore + +import opentelemetry.instrumentation.pymssql +from opentelemetry.instrumentation.pymssql import PyMSSQLInstrumentor +from opentelemetry.sdk import resources +from opentelemetry.test.test_base import TestBase + + +def mock_connect(*args, **kwargs): + class MockConnection: + def cursor(self): + # pylint: disable=no-self-use + return Mock() + + return MockConnection() + + +class TestPyMSSQLIntegration(TestBase): + def tearDown(self): + super().tearDown() + with self.disable_logging(): + PyMSSQLInstrumentor().uninstrument() + + def _execute_query_and_get_span(self, cnx): + cursor = cnx.cursor() + query = "SELECT * FROM test" + cursor.execute(query) + + spans_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans_list), 1) + span = spans_list[0] + + # Check version and name in span's instrumentation info + self.assertEqualSpanInstrumentationScope( + span, opentelemetry.instrumentation.pymssql + ) + return span + + @patch("pymssql.connect", new=mock_connect) + # pylint: disable=unused-argument + def test_instrumentor(self): + PyMSSQLInstrumentor().instrument() + + cnx = pymssql.connect( # pylint: disable=no-member + host="dbserver.local:1433", + database="testdb", + user="dbuser", + password="dbpassw0rd", + charset="UTF-8", + tds_version="7.1", + ) + span = self._execute_query_and_get_span(cnx) + + self.assertEqual(span.attributes["db.system"], "mssql") + self.assertEqual(span.attributes["db.name"], "testdb") + self.assertEqual(span.attributes["db.statement"], "SELECT * FROM test") + self.assertEqual(span.attributes["db.user"], "dbuser") + self.assertEqual(span.attributes["net.peer.name"], "dbserver.local") + self.assertEqual(span.attributes["net.peer.port"], "1433") + self.assertEqual(span.attributes["db.charset"], "UTF-8") + self.assertEqual(span.attributes["db.protocol.tds.version"], "7.1") + + # check that no spans are generated after uninstrument + PyMSSQLInstrumentor().uninstrument() + + cnx = pymssql.connect(database="test") # pylint: disable=no-member + cursor = cnx.cursor() + query = "SELECT * FROM test" + cursor.execute(query) + + spans_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans_list), 1) + + @patch("pymssql.connect", new=mock_connect) + # pylint: disable=unused-argument + def test_instrumentor_server_param(self): + PyMSSQLInstrumentor().instrument() + + # `server` can be used instead of `host` + cnx = pymssql.connect(server="dbserver.local:1433", database="testdb") # pylint: disable=no-member + span = self._execute_query_and_get_span(cnx) + + self.assertEqual(span.attributes["net.peer.name"], "dbserver.local") + self.assertEqual(span.attributes["net.peer.port"], "1433") + + @patch("pymssql.connect", new=mock_connect) + # pylint: disable=unused-argument + def test_instrumentor_port_param(self): + PyMSSQLInstrumentor().instrument() + + # port can be specified as a parameter + cnx = pymssql.connect( # pylint: disable=no-member + server="dbserver.local", port="1433", database="testdb" + ) + span = self._execute_query_and_get_span(cnx) + + self.assertEqual(span.attributes["net.peer.name"], "dbserver.local") + self.assertEqual(span.attributes["net.peer.port"], "1433") + + @patch("pymssql.connect", new=mock_connect) + # pylint: disable=unused-argument + def test_instrumentor_windows_server(self): + PyMSSQLInstrumentor().instrument() + + # Windows server names can include special characters + cnx = pymssql.connect(server=r"(local)\SQLEXPRESS", database="testdb") # pylint: disable=no-member + span = self._execute_query_and_get_span(cnx) + + self.assertEqual( + span.attributes["net.peer.name"], r"(local)\SQLEXPRESS" + ) + + @patch("pymssql.connect", new=mock_connect) + # pylint: disable=unused-argument + def test_custom_tracer_provider(self): + resource = resources.Resource.create({}) + result = self.create_tracer_provider(resource=resource) + tracer_provider, exporter = result + + PyMSSQLInstrumentor().instrument(tracer_provider=tracer_provider) + + cnx = pymssql.connect(database="test") # pylint: disable=no-member + cursor = cnx.cursor() + query = "SELECT * FROM test" + cursor.execute(query) + + spans_list = exporter.get_finished_spans() + self.assertEqual(len(spans_list), 1) + span = spans_list[0] + + self.assertIs(span.resource, resource) + + @patch("pymssql.connect", new=mock_connect) + # pylint: disable=unused-argument + def test_instrument_connection(self): + cnx = pymssql.connect(database="test") # pylint: disable=no-member + query = "SELECT * FROM test" + cursor = cnx.cursor() + cursor.execute(query) + + spans_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans_list), 0) + + cnx = PyMSSQLInstrumentor().instrument_connection(cnx) + cursor = cnx.cursor() + cursor.execute(query) + + spans_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans_list), 1) + + @patch("pymssql.connect", new=mock_connect) + # pylint: disable=unused-argument + def test_uninstrument_connection(self): + PyMSSQLInstrumentor().instrument() + cnx = pymssql.connect(database="test") # pylint: disable=no-member + query = "SELECT * FROM test" + cursor = cnx.cursor() + cursor.execute(query) + + spans_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans_list), 1) + + cnx = PyMSSQLInstrumentor().uninstrument_connection(cnx) + cursor = cnx.cursor() + cursor.execute(query) + + spans_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans_list), 1) diff --git a/opentelemetry-contrib-instrumentations/pyproject.toml b/opentelemetry-contrib-instrumentations/pyproject.toml index e19c0d69d8..9c0ab886d5 100644 --- a/opentelemetry-contrib-instrumentations/pyproject.toml +++ b/opentelemetry-contrib-instrumentations/pyproject.toml @@ -64,6 +64,7 @@ dependencies = [ "opentelemetry-instrumentation-psycopg2==0.51b0.dev", "opentelemetry-instrumentation-pymemcache==0.51b0.dev", "opentelemetry-instrumentation-pymongo==0.51b0.dev", + "opentelemetry-instrumentation-pymssql==0.51b0.dev", "opentelemetry-instrumentation-pymysql==0.51b0.dev", "opentelemetry-instrumentation-pyramid==0.51b0.dev", "opentelemetry-instrumentation-redis==0.51b0.dev", diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index cea9e3e11f..f28ebfb41b 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -156,6 +156,10 @@ "library": "pymongo >= 3.1, < 5.0", "instrumentation": "opentelemetry-instrumentation-pymongo==0.51b0.dev", }, + { + "library": "pymssql >= 2.1.5, < 3", + "instrumentation": "opentelemetry-instrumentation-pymssql==0.51b0.dev", + }, { "library": "PyMySQL < 2", "instrumentation": "opentelemetry-instrumentation-pymysql==0.51b0.dev", diff --git a/tox.ini b/tox.ini index 22e56a835f..b45b6b4d4c 100644 --- a/tox.ini +++ b/tox.ini @@ -243,6 +243,12 @@ envlist = pypy3-test-instrumentation-pymysql lint-instrumentation-pymysql + ; opentelemetry-instrumentation-pymssql + py3{8,9,10,11,12,13}-test-instrumentation-pymssql + ; pymssql has no support for pypy: see https://github.com/pymssql/pymssql/pull/517 + ; pypy3-test-instrumentation-pymssql + lint-instrumentation-pymssql + ; opentelemetry-instrumentation-pyramid ; TODO: add py313 when supported by pyramid py3{8,9,10,11,12}-test-instrumentation-pyramid @@ -581,6 +587,9 @@ deps = pymysql: {[testenv]test_deps} pymysql: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql/test-requirements.txt + pymssql: {[testenv]test_deps} + pymssql: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-pymssql/test-requirements.txt + pyramid: {[testenv]test_deps} pyramid: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-pyramid/test-requirements.txt @@ -829,6 +838,9 @@ commands = test-instrumentation-pymysql: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql/tests {posargs} lint-instrumentation-pymysql: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pymysql" + test-instrumentation-pymssql: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pymssql/tests {posargs} + lint-instrumentation-pymssql: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pymssql" + test-instrumentation-pyramid: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pyramid/tests {posargs} lint-instrumentation-pyramid: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pyramid" @@ -944,6 +956,7 @@ deps = -e {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2 -e {toxinidir}/instrumentation/opentelemetry-instrumentation-pymongo -e {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql + -e {toxinidir}/instrumentation/opentelemetry-instrumentation-pymssql -e {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlalchemy -e {toxinidir}/instrumentation/opentelemetry-instrumentation-aiopg -e {toxinidir}/instrumentation/opentelemetry-instrumentation-redis From 93e6fcfa26e8ab15d428830b91803a94b2e718b3 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 28 Jan 2025 09:55:28 +0100 Subject: [PATCH 09/16] botocore: add basic tracing for Bedrock InvokeModelWithStreamResponse (#3206) * Add basic tracing for InvokeModelWithResponseStream * Add changelog and please pylint --- CHANGELOG.md | 2 + .../bedrock-runtime/zero-code/README.rst | 1 + .../zero-code/invoke_model_stream.py | 51 +++++++ .../botocore/extensions/bedrock.py | 27 +++- .../botocore/extensions/bedrock_utils.py | 142 ++++++++++++++++- .../tests/bedrock_utils.py | 2 +- ...el_with_response_stream_invalid_model.yaml | 51 +++++++ ...onse_stream_with_content[amazon.nova].yaml | 144 ++++++++++++++++++ ...nse_stream_with_content[amazon.titan].yaml | 61 ++++++++ ...stream_with_content[anthropic.claude].yaml | 124 +++++++++++++++ .../tests/test_botocore_bedrock.py | 124 ++++++++++++++- 11 files changed, 720 insertions(+), 9 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model_stream.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_invalid_model.yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.nova].yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.titan].yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[anthropic.claude].yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f78a8d50f..543d7dab4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3200](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3200)) - `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock ConverseStream API ([#3204](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3204)) +- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock InvokeModelWithStreamResponse API + ([#3206](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3206)) - `opentelemetry-instrumentation-pymssql` Add pymssql instrumentation ([#394](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/394)) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst index cdd678c765..abecb0aa88 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst @@ -20,6 +20,7 @@ Available examples - `converse.py` uses `bedrock-runtime` `Converse API _`. - `converse_stream.py` uses `bedrock-runtime` `ConverseStream API _`. - `invoke_model.py` uses `bedrock-runtime` `InvokeModel API _`. +- `invoke_model_stream.py` uses `bedrock-runtime` `InvokeModelWithResponseStrea API _`. Setup ----- diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model_stream.py b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model_stream.py new file mode 100644 index 0000000000..deca2c9fb3 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/invoke_model_stream.py @@ -0,0 +1,51 @@ +import json +import os + +import boto3 + + +def main(): + chat_model = os.getenv("CHAT_MODEL", "amazon.titan-text-lite-v1") + prompt = "Write a short poem on OpenTelemetry." + if "amazon.titan" in chat_model: + body = { + "inputText": prompt, + "textGenerationConfig": {}, + } + elif "amazon.nova" in chat_model: + body = { + "messages": [{"role": "user", "content": [{"text": prompt}]}], + "schemaVersion": "messages-v1", + } + elif "anthropic.claude" in chat_model: + body = { + "messages": [ + {"role": "user", "content": [{"text": prompt, "type": "text"}]} + ], + "anthropic_version": "bedrock-2023-05-31", + "max_tokens": 200, + } + else: + raise ValueError() + client = boto3.client("bedrock-runtime") + response = client.invoke_model_with_response_stream( + modelId=chat_model, + body=json.dumps(body), + ) + + answer = "" + for event in response["body"]: + json_bytes = event.get("chunk", {}).get("bytes", b"") + decoded = json_bytes.decode("utf-8") + chunk = json.loads(decoded) + if "outputText" in chunk: + answer += chunk["outputText"] + elif "completion" in chunk: + answer += chunk["completion"] + elif "contentBlockDelta" in chunk: + answer += chunk["contentBlockDelta"]["delta"]["text"] + print(answer) + + +if __name__ == "__main__": + main() diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index fb664bb1e4..186029eadf 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -28,6 +28,7 @@ from opentelemetry.instrumentation.botocore.extensions.bedrock_utils import ( ConverseStreamWrapper, + InvokeModelWithResponseStreamWrapper, ) from opentelemetry.instrumentation.botocore.extensions.types import ( _AttributeMapT, @@ -66,8 +67,16 @@ class _BedrockRuntimeExtension(_AwsSdkExtension): Amazon Bedrock Runtime. """ - _HANDLED_OPERATIONS = {"Converse", "ConverseStream", "InvokeModel"} - _DONT_CLOSE_SPAN_ON_END_OPERATIONS = {"ConverseStream"} + _HANDLED_OPERATIONS = { + "Converse", + "ConverseStream", + "InvokeModel", + "InvokeModelWithResponseStream", + } + _DONT_CLOSE_SPAN_ON_END_OPERATIONS = { + "ConverseStream", + "InvokeModelWithResponseStream", + } def should_end_span_on_exit(self): return ( @@ -288,6 +297,20 @@ def stream_done_callback(response): # InvokeModel if "body" in result and isinstance(result["body"], StreamingBody): self._invoke_model_on_success(span, result, model_id) + return + + # InvokeModelWithResponseStream + if "body" in result and isinstance(result["body"], EventStream): + + def invoke_model_stream_done_callback(response): + # the callback gets data formatted as the simpler converse API + self._converse_on_success(span, response) + span.end() + + result["body"] = InvokeModelWithResponseStreamWrapper( + result["body"], invoke_model_stream_done_callback, model_id + ) + return # pylint: disable=no-self-use def _handle_amazon_titan_response( diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py index 55d90a2b9f..55f0fb0757 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py @@ -18,6 +18,8 @@ from __future__ import annotations +import json + from botocore.eventstream import EventStream from wrapt import ObjectProxy @@ -46,20 +48,21 @@ def __iter__(self): def _process_event(self, event): if "messageStart" in event: # {'messageStart': {'role': 'assistant'}} - pass + return if "contentBlockDelta" in event: # {'contentBlockDelta': {'delta': {'text': "Hello"}, 'contentBlockIndex': 0}} - pass + return if "contentBlockStop" in event: # {'contentBlockStop': {'contentBlockIndex': 0}} - pass + return if "messageStop" in event: # {'messageStop': {'stopReason': 'end_turn'}} if stop_reason := event["messageStop"].get("stopReason"): self._response["stopReason"] = stop_reason + return if "metadata" in event: # {'metadata': {'usage': {'inputTokens': 12, 'outputTokens': 15, 'totalTokens': 27}, 'metrics': {'latencyMs': 2980}}} @@ -72,3 +75,136 @@ def _process_event(self, event): self._response["usage"]["outputTokens"] = output_tokens self._stream_done_callback(self._response) + return + + +# pylint: disable=abstract-method +class InvokeModelWithResponseStreamWrapper(ObjectProxy): + """Wrapper for botocore.eventstream.EventStream""" + + def __init__( + self, + stream: EventStream, + stream_done_callback, + model_id: str, + ): + super().__init__(stream) + + self._stream_done_callback = stream_done_callback + self._model_id = model_id + + # accumulating things in the same shape of the Converse API + # {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish"} + self._response = {} + + def __iter__(self): + for event in self.__wrapped__: + self._process_event(event) + yield event + + def _process_event(self, event): + if "chunk" not in event: + return + + json_bytes = event["chunk"].get("bytes", b"") + decoded = json_bytes.decode("utf-8") + try: + chunk = json.loads(decoded) + except json.JSONDecodeError: + return + + if "amazon.titan" in self._model_id: + self._process_amazon_titan_chunk(chunk) + elif "amazon.nova" in self._model_id: + self._process_amazon_nova_chunk(chunk) + elif "anthropic.claude" in self._model_id: + self._process_anthropic_claude_chunk(chunk) + + def _process_invocation_metrics(self, invocation_metrics): + self._response["usage"] = {} + if input_tokens := invocation_metrics.get("inputTokenCount"): + self._response["usage"]["inputTokens"] = input_tokens + + if output_tokens := invocation_metrics.get("outputTokenCount"): + self._response["usage"]["outputTokens"] = output_tokens + + def _process_amazon_titan_chunk(self, chunk): + if (stop_reason := chunk.get("completionReason")) is not None: + self._response["stopReason"] = stop_reason + + if invocation_metrics := chunk.get("amazon-bedrock-invocationMetrics"): + # "amazon-bedrock-invocationMetrics":{ + # "inputTokenCount":9,"outputTokenCount":128,"invocationLatency":3569,"firstByteLatency":2180 + # } + self._process_invocation_metrics(invocation_metrics) + self._stream_done_callback(self._response) + + def _process_amazon_nova_chunk(self, chunk): + if "messageStart" in chunk: + # {'messageStart': {'role': 'assistant'}} + return + + if "contentBlockDelta" in chunk: + # {'contentBlockDelta': {'delta': {'text': "Hello"}, 'contentBlockIndex': 0}} + return + + if "contentBlockStop" in chunk: + # {'contentBlockStop': {'contentBlockIndex': 0}} + return + + if "messageStop" in chunk: + # {'messageStop': {'stopReason': 'end_turn'}} + if stop_reason := chunk["messageStop"].get("stopReason"): + self._response["stopReason"] = stop_reason + return + + if "metadata" in chunk: + # {'metadata': {'usage': {'inputTokens': 8, 'outputTokens': 117}, 'metrics': {}, 'trace': {}}} + if usage := chunk["metadata"].get("usage"): + self._response["usage"] = {} + if input_tokens := usage.get("inputTokens"): + self._response["usage"]["inputTokens"] = input_tokens + + if output_tokens := usage.get("outputTokens"): + self._response["usage"]["outputTokens"] = output_tokens + + self._stream_done_callback(self._response) + return + + def _process_anthropic_claude_chunk(self, chunk): + # pylint: disable=too-many-return-statements + if not (message_type := chunk.get("type")): + return + + if message_type == "message_start": + # {'type': 'message_start', 'message': {'id': 'id', 'type': 'message', 'role': 'assistant', 'model': 'claude-2.0', 'content': [], 'stop_reason': None, 'stop_sequence': None, 'usage': {'input_tokens': 18, 'output_tokens': 1}}} + return + + if message_type == "content_block_start": + # {'type': 'content_block_start', 'index': 0, 'content_block': {'type': 'text', 'text': ''}} + return + + if message_type == "content_block_delta": + # {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Here'}} + return + + if message_type == "content_block_stop": + # {'type': 'content_block_stop', 'index': 0} + return + + if message_type == "message_delta": + # {'type': 'message_delta', 'delta': {'stop_reason': 'end_turn', 'stop_sequence': None}, 'usage': {'output_tokens': 123}} + if ( + stop_reason := chunk.get("delta", {}).get("stop_reason") + ) is not None: + self._response["stopReason"] = stop_reason + return + + if message_type == "message_stop": + # {'type': 'message_stop', 'amazon-bedrock-invocationMetrics': {'inputTokenCount': 18, 'outputTokenCount': 123, 'invocationLatency': 5250, 'firstByteLatency': 290}} + if invocation_metrics := chunk.get( + "amazon-bedrock-invocationMetrics" + ): + self._process_invocation_metrics(invocation_metrics) + self._stream_done_callback(self._response) + return diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py index 1467817e2e..f3d7f9e5c6 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py @@ -128,7 +128,7 @@ def assert_converse_completion_attributes( ) -def assert_converse_stream_completion_attributes( +def assert_stream_completion_attributes( span: ReadableSpan, request_model: str, input_tokens: int | None = None, diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_invalid_model.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_invalid_model.yaml new file mode 100644 index 0000000000..1571adc412 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_invalid_model.yaml @@ -0,0 +1,51 @@ +interactions: +- request: + body: null + headers: + Content-Length: + - '0' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjRUMTM0NDM5Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLTFlMjljM2Y1LTU2MzZhOWI4MmViYTYxOTFiOTcwOTI2YTtQYXJlbnQ9NzA1NzBlZjUy + YzJkZjliYjtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + ZDg2MjFlMzAtNTk3Yi00ZWM3LWJlNGEtMThkMDQwZTRhMzcw + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/does-not-exist/invoke-with-response-stream + response: + body: + string: '{"message":"The provided model identifier is invalid."}' + headers: + Connection: + - keep-alive + Content-Length: + - '55' + Content-Type: + - application/json + Date: + - Fri, 24 Jan 2025 13:44:40 GMT + Set-Cookie: test_set_cookie + x-amzn-ErrorType: + - ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/ + x-amzn-RequestId: + - 6460a108-875d-4e26-bcdf-f03c4c815f74 + status: + code: 400 + message: Bad Request +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.nova].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.nova].yaml new file mode 100644 index 0000000000..99283f5726 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.nova].yaml @@ -0,0 +1,144 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}], + "inferenceConfig": {"max_new_tokens": 10, "temperature": 0.8, "topP": 1, "stopSequences": + ["|"]}, "schemaVersion": "messages-v1"}' + headers: + Content-Length: + - '207' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjRUMTM0NDM3Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWE0YWY3ZjVkLTY5YmE5ZDNiNjg5YjM2OTRkYThmZDk5NDtQYXJlbnQ9OThiYjVhY2U3 + MDE2YzZiZTtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + MmZkNDA5NjQtYTBiNS00NzAwLTljYjUtNjI5MWQ2OWU3YTFm + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.nova-micro-v1%3A0/invoke-with-response-stream + response: + body: + string: !!binary | + AAAA0QAAAEswuRGYCzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0 + aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5SnRaWE56WVdkbFUzUmhj + blFpT25zaWNtOXNaU0k2SW1GemMybHpkR0Z1ZENKOWZRPT0iLCJwIjoiYWJjZGVmZ2hpamtsbW5v + cHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFUifVUlBC4AAADcAAAAS8gp1SkLOmV2ZW50 + LXR5cGUHAAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10 + eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlKamIyNTBaVzUwUW14dlkydEVaV3gwWVNJNmV5SmtaV3gw + WVNJNmV5SjBaWGgwSWpvaVNYUWlmU3dpWTI5dWRHVnVkRUpzYjJOclNXNWtaWGdpT2pCOWZRPT0i + LCJwIjoiYWJjZGVmZ2hpamtsbW5vcHFyIn1vHubCAAAAywAAAEsa6Z67CzpldmVudC10eXBlBwAF + Y2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2 + ZW50eyJieXRlcyI6ImV5SmpiMjUwWlc1MFFteHZZMnRUZEc5d0lqcDdJbU52Ym5SbGJuUkNiRzlq + YTBsdVpHVjRJam93ZlgwPSIsInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ekFCQ0RFRkci + fTNzlqIAAADdAAAAS/VJ/JkLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBw + bGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlKamIyNTBaVzUw + UW14dlkydEVaV3gwWVNJNmV5SmtaV3gwWVNJNmV5SjBaWGgwSWpvaUlITnZkVzVrY3lKOUxDSmpi + MjUwWlc1MFFteHZZMnRKYm1SbGVDSTZNWDE5IiwicCI6ImFiY2RlZmdoaWprbG1ubyJ9PeTnmAAA + AK8AAABLdltPNgs6ZXZlbnQtdHlwZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlv + bi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2WTJ0 + VGRHOXdJanA3SW1OdmJuUmxiblJDYkc5amEwbHVaR1Y0SWpveGZYMD0iLCJwIjoiYWJjZGUifUde + LEMAAADzAAAAS4u4bfwLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGlj + YXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlKamIyNTBaVzUwUW14 + dlkydEVaV3gwWVNJNmV5SmtaV3gwWVNJNmV5SjBaWGgwSWpvaUlHeHBhMlVpZlN3aVkyOXVkR1Z1 + ZEVKc2IyTnJTVzVrWlhnaU9qSjlmUT09IiwicCI6ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6 + QUJDREVGR0hJSksifYXE3G0AAADCAAAASxf5/MoLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVu + dC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoi + ZXlKamIyNTBaVzUwUW14dlkydFRkRzl3SWpwN0ltTnZiblJsYm5SQ2JHOWphMGx1WkdWNElqb3lm + WDA9IiwicCI6ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eCJ9qa04SwAAAM0AAABLlalrGws6ZXZl + bnQtdHlwZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdl + LXR5cGUHAAVldmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2WTJ0RVpXeDBZU0k2ZXlKa1pX + eDBZU0k2ZXlKMFpYaDBJam9pSUhsdmRTSjlMQ0pqYjI1MFpXNTBRbXh2WTJ0SmJtUmxlQ0k2TTMx + OSIsInAiOiJhYmMifRT7tlwAAADeAAAAS7LphkkLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVu + dC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoi + ZXlKamIyNTBaVzUwUW14dlkydFRkRzl3SWpwN0ltTnZiblJsYm5SQ2JHOWphMGx1WkdWNElqb3pm + WDA9IiwicCI6ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6QUJDREVGR0hJSktMTU5PUFFSU1RV + VldYWVoifZUya0cAAADwAAAAS8wYFywLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVudC10eXBl + BwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlKamIy + NTBaVzUwUW14dlkydEVaV3gwWVNJNmV5SmtaV3gwWVNJNmV5SjBaWGgwSWpvaUp5SjlMQ0pqYjI1 + MFpXNTBRbXh2WTJ0SmJtUmxlQ0k2TkgxOSIsInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5 + ekFCQ0RFRkdISUpLTE1OT1AifViTWGoAAAC0AAAAS2Fr6aULOmV2ZW50LXR5cGUHAAVjaHVuaw06 + Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5 + dGVzIjoiZXlKamIyNTBaVzUwUW14dlkydFRkRzl3SWpwN0ltTnZiblJsYm5SQ2JHOWphMGx1WkdW + NElqbzBmWDA9IiwicCI6ImFiY2RlZmdoaWoifbI54e4AAADyAAAAS7bYREwLOmV2ZW50LXR5cGUH + AAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAF + ZXZlbnR7ImJ5dGVzIjoiZXlKamIyNTBaVzUwUW14dlkydEVaV3gwWVNJNmV5SmtaV3gwWVNJNmV5 + SjBaWGgwSWpvaWRtVWlmU3dpWTI5dWRHVnVkRUpzYjJOclNXNWtaWGdpT2pWOWZRPT0iLCJwIjoi + YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTiJ9j3DJqQAAAMMAAABLKpnV + egs6ZXZlbnQtdHlwZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTpt + ZXNzYWdlLXR5cGUHAAVldmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2WTJ0VGRHOXdJanA3 + SW1OdmJuUmxiblJDYkc5amEwbHVaR1Y0SWpvMWZYMD0iLCJwIjoiYWJjZGVmZ2hpamtsbW5vcHFy + c3R1dnd4eSJ9F6CZmwAAAQcAAABL/VBIxAs6ZXZlbnQtdHlwZQcABWNodW5rDTpjb250ZW50LXR5 + cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsiYnl0ZXMiOiJleUpq + YjI1MFpXNTBRbXh2WTJ0RVpXeDBZU0k2ZXlKa1pXeDBZU0k2ZXlKMFpYaDBJam9pSUdsemMzVmxa + Q0o5TENKamIyNTBaVzUwUW14dlkydEpibVJsZUNJNk5uMTkiLCJwIjoiYWJjZGVmZ2hpamtsbW5v + cHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0In2LnbEKAAAAtQAAAEtc + C8AVCzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pzb24N + Om1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5SmpiMjUwWlc1MFFteHZZMnRUZEc5d0lq + cDdJbU52Ym5SbGJuUkNiRzlqYTBsdVpHVjRJam8yZlgwPSIsInAiOiJhYmNkZWZnaGlqayJ92s5l + DQAAAN0AAABL9Un8mQs6ZXZlbnQtdHlwZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNh + dGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2 + WTJ0RVpXeDBZU0k2ZXlKa1pXeDBZU0k2ZXlKMFpYaDBJam9pSUdFaWZTd2lZMjl1ZEdWdWRFSnNi + Mk5yU1c1a1pYZ2lPamQ5ZlE9PSIsInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzIn2i7NJQAAAA0AAA + AEsN2TgoCzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pz + b24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5SmpiMjUwWlc1MFFteHZZMnRUZEc5 + d0lqcDdJbU52Ym5SbGJuUkNiRzlqYTBsdVpHVjRJam8zZlgwPSIsInAiOiJhYmNkZWZnaGlqa2xt + bm9wcXJzdHV2d3h5ekFCQ0RFRkdISUpLTCJ9vFdU3wAAAP8AAABLTkiA/Qs6ZXZlbnQtdHlwZQcA + BWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVl + dmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2WTJ0RVpXeDBZU0k2ZXlKa1pXeDBZU0k2ZXlK + MFpYaDBJam9pSUhSbGMzUWlmU3dpWTI5dWRHVnVkRUpzYjJOclNXNWtaWGdpT2poOWZRPT0iLCJw + IjoiYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWVyJ9KX8k + OAAAAMwAAABLqMlCqws6ZXZlbnQtdHlwZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNh + dGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2 + WTJ0VGRHOXdJanA3SW1OdmJuUmxiblJDYkc5amEwbHVaR1Y0SWpvNGZYMD0iLCJwIjoiYWJjZGVm + Z2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSCJ9PV9naQAAAPAAAABLzBgXLAs6ZXZlbnQtdHlw + ZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUH + AAVldmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2WTJ0RVpXeDBZU0k2ZXlKa1pXeDBZU0k2 + ZXlKMFpYaDBJam9pSUhCeWIyMXdkQ0o5TENKamIyNTBaVzUwUW14dlkydEpibVJsZUNJNk9YMTki + LCJwIjoiYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSCJ9r7jZaQAAANEAAABLMLkR + mAs6ZXZlbnQtdHlwZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTpt + ZXNzYWdlLXR5cGUHAAVldmVudHsiYnl0ZXMiOiJleUpqYjI1MFpXNTBRbXh2WTJ0VGRHOXdJanA3 + SW1OdmJuUmxiblJDYkc5amEwbHVaR1Y0SWpvNWZYMD0iLCJwIjoiYWJjZGVmZ2hpamtsbW5vcHFy + c3R1dnd4eXpBQkNERUZHSElKS0xNIn3bLXYKAAAA3QAAAEv1SfyZCzpldmVudC10eXBlBwAFY2h1 + bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50 + eyJieXRlcyI6ImV5SnRaWE56WVdkbFUzUnZjQ0k2ZXlKemRHOXdVbVZoYzI5dUlqb2liV0Y0WDNS + dmEyVnVjeUo5ZlE9PSIsInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ekFCQ0RFRkdISUpL + TE1OT1BRUlNUVVZXWFkifTuIaJUAAAGLAAAAS4lGFVcLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29u + dGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVz + IjoiZXlKdFpYUmhaR0YwWVNJNmV5SjFjMkZuWlNJNmV5SnBibkIxZEZSdmEyVnVjeUk2TlN3aWIz + VjBjSFYwVkc5clpXNXpJam94TUgwc0ltMWxkSEpwWTNNaU9udDlMQ0owY21GalpTSTZlMzE5TENK + aGJXRjZiMjR0WW1Wa2NtOWpheTFwYm5adlkyRjBhVzl1VFdWMGNtbGpjeUk2ZXlKcGJuQjFkRlJ2 + YTJWdVEyOTFiblFpT2pVc0ltOTFkSEIxZEZSdmEyVnVRMjkxYm5RaU9qRXdMQ0pwYm5adlkyRjBh + Vzl1VEdGMFpXNWplU0k2TVRjM0xDSm1hWEp6ZEVKNWRHVk1ZWFJsYm1ONUlqbzFPWDE5IiwicCI6 + ImFiY2RlZmdoaSJ9lvCfnQ== + headers: + Connection: + - keep-alive + Content-Type: + - application/vnd.amazon.eventstream + Date: + - Fri, 24 Jan 2025 13:44:37 GMT + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Amzn-Bedrock-Content-Type: + - application/json + x-amzn-RequestId: + - 632c3a74-f6a4-43e5-b8ff-9c2f84daf7a6 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.titan].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.titan].yaml new file mode 100644 index 0000000000..50f920bf3a --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.titan].yaml @@ -0,0 +1,61 @@ +interactions: +- request: + body: '{"inputText": "Say this is a test", "textGenerationConfig": {"maxTokenCount": + 10, "temperature": 0.8, "topP": 1, "stopSequences": ["|"]}}' + headers: + Content-Length: + - '137' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjRUMTM0NDM3Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLTZmYzFlYzExLTczOTA3MmQwMDhhNzRhNjI3ZDg2NDI4ODtQYXJlbnQ9ZGQ0MjJhMjdl + MDdiMDU4NjtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + OTNhOWIwYTctMGJmNi00NGI4LWJlNzItNDFiY2Y0NzdjNWQ5 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/invoke-with-response-stream + response: + body: + string: !!binary | + AAAB9QAAAEvPpEv5CzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0 + aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5SnZkWFJ3ZFhSVVpYaDBJ + am9pTGx4dVRXOXlaU0JwYm1admNtMWhkR2x2YmlCM2FXeHNJR0psSUc1bFpXUmxaQ0JpWldadmNt + VWdZU0J3Y205d1pYSWlMQ0pwYm1SbGVDSTZNQ3dpZEc5MFlXeFBkWFJ3ZFhSVVpYaDBWRzlyWlc1 + RGIzVnVkQ0k2TVRBc0ltTnZiWEJzWlhScGIyNVNaV0Z6YjI0aU9pSk1SVTVIVkVnaUxDSnBibkIx + ZEZSbGVIUlViMnRsYmtOdmRXNTBJam8xTENKaGJXRjZiMjR0WW1Wa2NtOWpheTFwYm5adlkyRjBh + Vzl1VFdWMGNtbGpjeUk2ZXlKcGJuQjFkRlJ2YTJWdVEyOTFiblFpT2pVc0ltOTFkSEIxZEZSdmEy + VnVRMjkxYm5RaU9qRXdMQ0pwYm5adlkyRjBhVzl1VEdGMFpXNWplU0k2TnpBNExDSm1hWEp6ZEVK + NWRHVk1ZWFJsYm1ONUlqbzNNRGQ5ZlE9PSIsInAiOiJhYmNkZWZnIn2cuo/H + headers: + Connection: + - keep-alive + Content-Type: + - application/vnd.amazon.eventstream + Date: + - Fri, 24 Jan 2025 13:44:38 GMT + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Amzn-Bedrock-Content-Type: + - application/json + x-amzn-RequestId: + - 47aaeeb3-fa59-4aae-b480-01632279e2fa + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[anthropic.claude].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[anthropic.claude].yaml new file mode 100644 index 0000000000..03a48c80e7 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_with_content[anthropic.claude].yaml @@ -0,0 +1,124 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test", + "type": "text"}]}], "anthropic_version": "bedrock-2023-05-31", "max_tokens": + 10, "temperature": 0.8, "top_p": 1, "stop_sequences": ["|"]}' + headers: + Content-Length: + - '211' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjRUMTM0NDM4Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWY5MjBjYzFhLTI1ZGI4MTgwYTZiOGQ3YWQ0MDI0Zjg3YTtQYXJlbnQ9NDhlOTc1MmFm + NjZjMTA5ODtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + NGQ1NzllYzAtZDIxZi00NTVhLTkxOGMtNDgxNjE3ZjliZjQx + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-v2/invoke-with-response-stream + response: + body: + string: !!binary | + AAABsQAAAEti17VwCzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0 + aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5SjBlWEJsSWpvaWJXVnpj + MkZuWlY5emRHRnlkQ0lzSW0xbGMzTmhaMlVpT25zaWFXUWlPaUp0YzJkZlltUnlhMTh3TVRaVWNW + Vk5aRU5GY0VWYU5YaDNURGhRYVdwSWJ6VWlMQ0owZVhCbElqb2liV1Z6YzJGblpTSXNJbkp2YkdV + aU9pSmhjM05wYzNSaGJuUWlMQ0p0YjJSbGJDSTZJbU5zWVhWa1pTMHlMakFpTENKamIyNTBaVzUw + SWpwYlhTd2ljM1J2Y0Y5eVpXRnpiMjRpT201MWJHd3NJbk4wYjNCZmMyVnhkV1Z1WTJVaU9tNTFi + R3dzSW5WellXZGxJanA3SW1sdWNIVjBYM1J2YTJWdWN5STZNVFFzSW05MWRIQjFkRjkwYjJ0bGJu + TWlPakY5ZlgwPSIsInAiOiJhYmNkZWZnaGlqayJ9gkyXHwAAAOIAAABL1jjTzgs6ZXZlbnQtdHlw + ZQcABWNodW5rDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUH + AAVldmVudHsiYnl0ZXMiOiJleUowZVhCbElqb2lZMjl1ZEdWdWRGOWliRzlqYTE5emRHRnlkQ0lz + SW1sdVpHVjRJam93TENKamIyNTBaVzUwWDJKc2IyTnJJanA3SW5SNWNHVWlPaUowWlhoMElpd2lk + R1Y0ZENJNklpSjlmUT09IiwicCI6ImFiY2QifScLG7kAAAEOAAAAS/BAKrULOmV2ZW50LXR5cGUH + AAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAF + ZXZlbnR7ImJ5dGVzIjoiZXlKMGVYQmxJam9pWTI5dWRHVnVkRjlpYkc5amExOWtaV3gwWVNJc0lt + bHVaR1Y0SWpvd0xDSmtaV3gwWVNJNmV5SjBlWEJsSWpvaWRHVjRkRjlrWld4MFlTSXNJblJsZUhR + aU9pSlBhMkY1SW4xOSIsInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ekFCQ0RFRkdISUpL + TE1OT1BRUlNUVVYifZyNweMAAADxAAAAS/F4PpwLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVu + dC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoi + ZXlKMGVYQmxJam9pWTI5dWRHVnVkRjlpYkc5amExOWtaV3gwWVNJc0ltbHVaR1Y0SWpvd0xDSmta + V3gwWVNJNmV5SjBlWEJsSWpvaWRHVjRkRjlrWld4MFlTSXNJblJsZUhRaU9pSXNJbjE5IiwicCI6 + ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3In2bsnIkAAABAQAAAEtyEL1kCzpldmVudC10eXBlBwAF + Y2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2 + ZW50eyJieXRlcyI6ImV5SjBlWEJsSWpvaVkyOXVkR1Z1ZEY5aWJHOWphMTlrWld4MFlTSXNJbWx1 + WkdWNElqb3dMQ0prWld4MFlTSTZleUowZVhCbElqb2lkR1Y0ZEY5a1pXeDBZU0lzSW5SbGVIUWlP + aUlnU1NKOWZRPT0iLCJwIjoiYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSEkifS+F + e3sAAAEFAAAAS4eQG6QLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGlj + YXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlKMGVYQmxJam9pWTI5 + dWRHVnVkRjlpYkc5amExOWtaV3gwWVNJc0ltbHVaR1Y0SWpvd0xDSmtaV3gwWVNJNmV5SjBlWEJs + SWpvaWRHVjRkRjlrWld4MFlTSXNJblJsZUhRaU9pSWdkMmxzYkNKOWZRPT0iLCJwIjoiYWJjZGVm + Z2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSEkifXVHT8sAAAD2AAAAS0NY4owLOmV2ZW50LXR5 + cGUHAAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBl + BwAFZXZlbnR7ImJ5dGVzIjoiZXlKMGVYQmxJam9pWTI5dWRHVnVkRjlpYkc5amExOWtaV3gwWVNJ + c0ltbHVaR1Y0SWpvd0xDSmtaV3gwWVNJNmV5SjBlWEJsSWpvaWRHVjRkRjlrWld4MFlTSXNJblJs + ZUhRaU9pSWdjMkY1SW4xOSIsInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3gifdFfmr0AAAD2 + AAAAS0NY4owLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24v + anNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlKMGVYQmxJam9pWTI5dWRHVnVk + RjlpYkc5amExOWtaV3gwWVNJc0ltbHVaR1Y0SWpvd0xDSmtaV3gwWVNJNmV5SjBlWEJsSWpvaWRH + VjRkRjlrWld4MFlTSXNJblJsZUhRaU9pSWdYQ0lpZlgwPSIsInAiOiJhYmNkZWZnaGlqa2xtbm9w + cXJzdHV2d3gifXU4Q4UAAADzAAAAS4u4bfwLOmV2ZW50LXR5cGUHAAVjaHVuaw06Y29udGVudC10 + eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlK + MGVYQmxJam9pWTI5dWRHVnVkRjlpYkc5amExOWtaV3gwWVNJc0ltbHVaR1Y0SWpvd0xDSmtaV3gw + WVNJNmV5SjBlWEJsSWpvaWRHVjRkRjlrWld4MFlTSXNJblJsZUhRaU9pSlVhR2x6SW4xOSIsInAi + OiJhYmNkZWZnaGlqa2xtbm9wcXJzdHUifZY82M0AAAEPAAAAS80gAwULOmV2ZW50LXR5cGUHAAVj + aHVuaw06Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZl + bnR7ImJ5dGVzIjoiZXlKMGVYQmxJam9pWTI5dWRHVnVkRjlpYkc5amExOWtaV3gwWVNJc0ltbHVa + R1Y0SWpvd0xDSmtaV3gwWVNJNmV5SjBlWEJsSWpvaWRHVjRkRjlrWld4MFlTSXNJblJsZUhRaU9p + SWdhWE1pZlgwPSIsInAiOiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ekFCQ0RFRkdISUpLTE1O + T1BRUlNUVVZXIn2PKfsQAAAA+wAAAEu7yCY9CzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQt + dHlwZQcAEGFwcGxpY2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5 + SjBlWEJsSWpvaVkyOXVkR1Z1ZEY5aWJHOWphMTlrWld4MFlTSXNJbWx1WkdWNElqb3dMQ0prWld4 + MFlTSTZleUowZVhCbElqb2lkR1Y0ZEY5a1pXeDBZU0lzSW5SbGVIUWlPaUlnWVNKOWZRPT0iLCJw + IjoiYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkMifWGL95MAAAEKAAAASwXAjHULOmV2ZW50 + LXR5cGUHAAVjaHVuaw06Y29udGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10 + eXBlBwAFZXZlbnR7ImJ5dGVzIjoiZXlKMGVYQmxJam9pWTI5dWRHVnVkRjlpYkc5amExOWtaV3gw + WVNJc0ltbHVaR1Y0SWpvd0xDSmtaV3gwWVNJNmV5SjBlWEJsSWpvaWRHVjRkRjlrWld4MFlTSXNJ + blJsZUhRaU9pSWdkR1Z6ZENKOWZRPT0iLCJwIjoiYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpB + QkNERUZHSElKS0xNTiJ9xf99iwAAALwAAABLURuiZAs6ZXZlbnQtdHlwZQcABWNodW5rDTpjb250 + ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsiYnl0ZXMi + OiJleUowZVhCbElqb2lZMjl1ZEdWdWRGOWliRzlqYTE5emRHOXdJaXdpYVc1a1pYZ2lPakI5Iiwi + cCI6ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6In0ErvuiAAABDgAAAEvwQCq1CzpldmVudC10 + eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlw + ZQcABWV2ZW50eyJieXRlcyI6ImV5SjBlWEJsSWpvaWJXVnpjMkZuWlY5a1pXeDBZU0lzSW1SbGJI + UmhJanA3SW5OMGIzQmZjbVZoYzI5dUlqb2liV0Y0WDNSdmEyVnVjeUlzSW5OMGIzQmZjMlZ4ZFdW + dVkyVWlPbTUxYkd4OUxDSjFjMkZuWlNJNmV5SnZkWFJ3ZFhSZmRHOXJaVzV6SWpveE1IMTkiLCJw + IjoiYWJjZGVmZ2hpamtsIn0VL5oQAAABSQAAAEsak67sCzpldmVudC10eXBlBwAFY2h1bmsNOmNv + bnRlbnQtdHlwZQcAEGFwcGxpY2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRl + cyI6ImV5SjBlWEJsSWpvaWJXVnpjMkZuWlY5emRHOXdJaXdpWVcxaGVtOXVMV0psWkhKdlkyc3Rh + VzUyYjJOaGRHbHZiazFsZEhKcFkzTWlPbnNpYVc1d2RYUlViMnRsYmtOdmRXNTBJam94TkN3aWIz + VjBjSFYwVkc5clpXNURiM1Z1ZENJNk1UQXNJbWx1ZG05allYUnBiMjVNWVhSbGJtTjVJam8xT1Rj + c0ltWnBjbk4wUW5sMFpVeGhkR1Z1WTNraU9qSTVNMzE5IiwicCI6ImFiY2RlZmdoaWprbG1ub3Bx + cnMifV7iQSc= + headers: + Connection: + - keep-alive + Content-Type: + - application/vnd.amazon.eventstream + Date: + - Fri, 24 Jan 2025 13:44:39 GMT + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Amzn-Bedrock-Content-Type: + - application/json + x-amzn-RequestId: + - e52df188-e57f-43bb-a1bf-cfb42fd11fcd + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py index ce3b4375e9..b9f5589988 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -27,7 +27,7 @@ from .bedrock_utils import ( assert_completion_attributes_from_streaming_body, assert_converse_completion_attributes, - assert_converse_stream_completion_attributes, + assert_stream_completion_attributes, ) BOTO3_VERSION = tuple(int(x) for x in boto3.__version__.split(".")) @@ -149,9 +149,12 @@ def test_converse_stream_with_content( output_tokens = usage["outputTokens"] assert text + assert finish_reason + assert input_tokens + assert output_tokens (span,) = span_exporter.get_finished_spans() - assert_converse_stream_completion_attributes( + assert_stream_completion_attributes( span, llm_model_value, input_tokens, @@ -188,7 +191,7 @@ def test_converse_stream_with_invalid_model( ) (span,) = span_exporter.get_finished_spans() - assert_converse_stream_completion_attributes( + assert_stream_completion_attributes( span, llm_model_value, operation_name="chat", @@ -322,3 +325,118 @@ def test_invoke_model_with_invalid_model( logs = log_exporter.get_finished_logs() assert len(logs) == 0 + + +@pytest.mark.parametrize( + "model_family", + ["amazon.nova", "amazon.titan", "anthropic.claude"], +) +@pytest.mark.vcr() +def test_invoke_model_with_response_stream_with_content( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, + model_family, +): + # pylint:disable=too-many-locals + llm_model_value = get_model_name_from_family(model_family) + max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] + body = get_invoke_model_body( + llm_model_value, max_tokens, temperature, top_p, stop_sequences + ) + response = bedrock_runtime_client.invoke_model_with_response_stream( + body=body, + modelId=llm_model_value, + ) + + # consume the stream in order to have it traced + finish_reason = None + input_tokens, output_tokens = None, None + text = "" + for event in response["body"]: + json_bytes = event["chunk"].get("bytes", b"") + decoded = json_bytes.decode("utf-8") + chunk = json.loads(decoded) + + # amazon.titan + if (stop_reason := chunk.get("completionReason")) is not None: + finish_reason = stop_reason + + if (output_text := chunk.get("outputText")) is not None: + text += output_text + + # amazon.titan, anthropic.claude + if invocation_metrics := chunk.get("amazon-bedrock-invocationMetrics"): + input_tokens = invocation_metrics["inputTokenCount"] + output_tokens = invocation_metrics["outputTokenCount"] + + # anthropic.claude + if (message_type := chunk.get("type")) is not None: + if message_type == "content_block_start": + text += chunk["content_block"]["text"] + elif message_type == "content_block_delta": + text += chunk["delta"]["text"] + elif message_type == "message_delta": + finish_reason = chunk["delta"]["stop_reason"] + + # amazon nova + if "contentBlockDelta" in chunk: + text += chunk["contentBlockDelta"]["delta"]["text"] + if "messageStop" in chunk: + finish_reason = chunk["messageStop"]["stopReason"] + if "metadata" in chunk: + usage = chunk["metadata"]["usage"] + input_tokens = usage["inputTokens"] + output_tokens = usage["outputTokens"] + + assert text + assert finish_reason + assert input_tokens + assert output_tokens + + (span,) = span_exporter.get_finished_spans() + assert_stream_completion_attributes( + span, + llm_model_value, + input_tokens, + output_tokens, + (finish_reason,), + "text_completion" if model_family == "amazon.titan" else "chat", + top_p, + temperature, + max_tokens, + stop_sequences, + ) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + +@pytest.mark.vcr() +def test_invoke_model_with_response_stream_invalid_model( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + llm_model_value = "does-not-exist" + with pytest.raises(bedrock_runtime_client.exceptions.ClientError): + bedrock_runtime_client.invoke_model_with_response_stream( + body=b"", + modelId=llm_model_value, + ) + + (span,) = span_exporter.get_finished_spans() + assert_completion_attributes_from_streaming_body( + span, + llm_model_value, + None, + "chat", + ) + + assert span.status.status_code == StatusCode.ERROR + assert span.attributes[ERROR_TYPE] == "ValidationException" + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 From eabab7d0ad2b5f9c2df71a1561d82bfb8ac44094 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 28 Jan 2025 14:31:11 +0100 Subject: [PATCH 10/16] Fix changelog (#3213) --- CHANGELOG.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 543d7dab4d..ce7f9acf03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,14 +67,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-instrumentation-sqlalchemy` including sqlcomment in `db.statement` span attribute value is now opt-in ([#3112](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3112)) - -### Breaking changes - -- `opentelemetry-instrumentation-dbapi` including sqlcomment in `db.statement` span attribute value is now opt-in - ([#3115](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3115)) - -### Breaking changes - - `opentelemetry-instrumentation-dbapi` including sqlcomment in `db.statement` span attribute value is now opt-in ([#3115](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3115)) - `opentelemetry-instrumentation-psycopg2`, `opentelemetry-instrumentation-psycopg`, `opentelemetry-instrumentation-mysqlclient`, `opentelemetry-instrumentation-pymysql`: including sqlcomment in `db.statement` span attribute value is now opt-in From dd682419074bfd7b054a40e346e95056bee7801c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Em=C3=ADdio=20Neto?= <9735060+emdneto@users.noreply.github.com> Date: Tue, 28 Jan 2025 11:05:19 -0300 Subject: [PATCH 11/16] fix pymssql entry_point for auto-instrumentation (#3214) Signed-off-by: emdneto <9735060+emdneto@users.noreply.github.com> Co-authored-by: Riccardo Magliocchetti --- .../pyproject.toml | 2 +- .../tests/test_pymssql_integration.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml b/instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml index 5360007a5c..ac0e839e5c 100644 --- a/instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml +++ b/instrumentation/opentelemetry-instrumentation-pymssql/pyproject.toml @@ -37,7 +37,7 @@ instruments = [ ] [project.entry-points.opentelemetry_instrumentor] -pymssql = "opentelemetry.instrumentation.pymssql:pymssqlInstrumentor" +pymssql = "opentelemetry.instrumentation.pymssql:PyMSSQLInstrumentor" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymssql" diff --git a/instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py b/instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py index ccd522b165..3dc937318a 100644 --- a/instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py +++ b/instrumentation/opentelemetry-instrumentation-pymssql/tests/test_pymssql_integration.py @@ -20,6 +20,7 @@ from opentelemetry.instrumentation.pymssql import PyMSSQLInstrumentor from opentelemetry.sdk import resources from opentelemetry.test.test_base import TestBase +from opentelemetry.util._importlib_metadata import entry_points def mock_connect(*args, **kwargs): @@ -182,3 +183,15 @@ def test_uninstrument_connection(self): spans_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans_list), 1) + + def test_load_entry_point(self): + self.assertIs( + next( + iter( + entry_points( + group="opentelemetry_instrumentor", name="pymssql" + ) + ) + ).load(), + PyMSSQLInstrumentor, + ) From 731054f736461519b03c94ec850e8d141dfcb517 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Tue, 28 Jan 2025 11:46:13 -0500 Subject: [PATCH 12/16] Add server attributes to Vertex AI spans (#3208) --- .../CHANGELOG.md | 2 ++ .../instrumentation/vertexai/patch.py | 7 +++- .../instrumentation/vertexai/utils.py | 20 ++++++++++++ .../tests/test_chat_completions.py | 10 ++++++ .../tests/test_utils.py | 32 +++++++++++++++++++ 5 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_utils.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md index 4e43fbff19..44725df52d 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md @@ -11,3 +11,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3192](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3192)) - Initial VertexAI instrumentation ([#3123](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3123)) +- Add server attributes to Vertex AI spans + ([#3208](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3208)) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py index 36a31045b5..ecb87e4360 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py @@ -25,6 +25,7 @@ from opentelemetry.instrumentation.vertexai.utils import ( GenerateContentParams, get_genai_request_attributes, + get_server_attributes, get_span_name, ) from opentelemetry.trace import SpanKind, Tracer @@ -100,7 +101,11 @@ def traced_method( kwargs: Any, ): params = _extract_params(*args, **kwargs) - span_attributes = get_genai_request_attributes(params) + api_endpoint: str = instance.api_endpoint # type: ignore[reportUnknownMemberType] + span_attributes = { + **get_genai_request_attributes(params), + **get_server_attributes(api_endpoint), + } span_name = get_span_name(span_attributes) with tracer.start_as_current_span( diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py index 96d7125028..e4297bc878 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py @@ -22,10 +22,12 @@ Mapping, Sequence, ) +from urllib.parse import urlparse from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) +from opentelemetry.semconv.attributes import server_attributes from opentelemetry.util.types import AttributeValue if TYPE_CHECKING: @@ -58,6 +60,24 @@ class GenerateContentParams: ) = None +def get_server_attributes( + endpoint: str, +) -> dict[str, AttributeValue]: + """Get server.* attributes from the endpoint, which is a hostname with optional port e.g. + - ``us-central1-aiplatform.googleapis.com`` + - ``us-central1-aiplatform.googleapis.com:5431`` + """ + parsed = urlparse(f"scheme://{endpoint}") + + if not parsed.hostname: + return {} + + return { + server_attributes.SERVER_ADDRESS: parsed.hostname, + server_attributes.SERVER_PORT: parsed.port or 443, + } + + def get_genai_request_attributes( params: GenerateContentParams, operation_name: GenAIAttributes.GenAiOperationNameValues = GenAIAttributes.GenAiOperationNameValues.CHAT, diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py index 63a2e2c2d1..2582a086f6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py @@ -34,6 +34,8 @@ def test_generate_content( "gen_ai.operation.name": "chat", "gen_ai.request.model": "gemini-1.5-flash-002", "gen_ai.system": "vertex_ai", + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 443, } @@ -62,6 +64,8 @@ def test_generate_content_empty_model( "gen_ai.operation.name": "chat", "gen_ai.request.model": "", "gen_ai.system": "vertex_ai", + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 443, } assert_span_error(spans[0]) @@ -91,6 +95,8 @@ def test_generate_content_missing_model( "gen_ai.operation.name": "chat", "gen_ai.request.model": "gemini-does-not-exist", "gen_ai.system": "vertex_ai", + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 443, } assert_span_error(spans[0]) @@ -122,6 +128,8 @@ def test_generate_content_invalid_temperature( "gen_ai.request.model": "gemini-1.5-flash-002", "gen_ai.request.temperature": 1000.0, "gen_ai.system": "vertex_ai", + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 443, } assert_span_error(spans[0]) @@ -158,6 +166,8 @@ def test_generate_content_extra_params(span_exporter, instrument_no_content): "gen_ai.request.temperature": 0.20000000298023224, "gen_ai.request.top_p": 0.949999988079071, "gen_ai.system": "vertex_ai", + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 443, } diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_utils.py new file mode 100644 index 0000000000..082ea72ad9 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_utils.py @@ -0,0 +1,32 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.instrumentation.vertexai.utils import get_server_attributes + + +def test_get_server_attributes() -> None: + # without port + assert get_server_attributes("us-central1-aiplatform.googleapis.com") == { + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 443, + } + + # with port + assert get_server_attributes( + "us-central1-aiplatform.googleapis.com:5432" + ) == { + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 5432, + } From 44754e2a505e13a3382ae1640cf7541bbcb74e43 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 28 Jan 2025 20:55:40 +0100 Subject: [PATCH 13/16] botocore: handle exceptions when consuming EventStream in bedrock extension (#3211) --- .../botocore/extensions/bedrock.py | 19 ++- .../botocore/extensions/bedrock_utils.py | 34 ++++-- ...rse_stream_handles_event_stream_error.yaml | 71 +++++++++++ ..._response_stream_handles_stream_error.yaml | 62 ++++++++++ .../tests/test_botocore_bedrock.py | 111 ++++++++++++++++++ 5 files changed, 286 insertions(+), 11 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_handles_event_stream_error.yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_handles_stream_error.yaml diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index 186029eadf..6d6bbce6ac 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -266,6 +266,12 @@ def _invoke_model_on_success( if original_body is not None: original_body.close() + def _on_stream_error_callback(self, span: Span, exception): + span.set_status(Status(StatusCode.ERROR, str(exception))) + if span.is_recording(): + span.set_attribute(ERROR_TYPE, type(exception).__qualname__) + span.end() + def on_success(self, span: Span, result: dict[str, Any]): if self._call_context.operation not in self._HANDLED_OPERATIONS: return @@ -282,8 +288,11 @@ def stream_done_callback(response): self._converse_on_success(span, response) span.end() + def stream_error_callback(exception): + self._on_stream_error_callback(span, exception) + result["stream"] = ConverseStreamWrapper( - result["stream"], stream_done_callback + result["stream"], stream_done_callback, stream_error_callback ) return @@ -307,8 +316,14 @@ def invoke_model_stream_done_callback(response): self._converse_on_success(span, response) span.end() + def invoke_model_stream_error_callback(exception): + self._on_stream_error_callback(span, exception) + result["body"] = InvokeModelWithResponseStreamWrapper( - result["body"], invoke_model_stream_done_callback, model_id + result["body"], + invoke_model_stream_done_callback, + invoke_model_stream_error_callback, + model_id, ) return diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py index 55f0fb0757..5911c91445 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py @@ -19,10 +19,14 @@ from __future__ import annotations import json +from typing import Callable, Dict, Union -from botocore.eventstream import EventStream +from botocore.eventstream import EventStream, EventStreamError from wrapt import ObjectProxy +_StreamDoneCallableT = Callable[[Dict[str, Union[int, str]]], None] +_StreamErrorCallableT = Callable[[Exception], None] + # pylint: disable=abstract-method class ConverseStreamWrapper(ObjectProxy): @@ -31,19 +35,25 @@ class ConverseStreamWrapper(ObjectProxy): def __init__( self, stream: EventStream, - stream_done_callback, + stream_done_callback: _StreamDoneCallableT, + stream_error_callback: _StreamErrorCallableT, ): super().__init__(stream) self._stream_done_callback = stream_done_callback + self._stream_error_callback = stream_error_callback # accumulating things in the same shape of non-streaming version # {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish"} self._response = {} def __iter__(self): - for event in self.__wrapped__: - self._process_event(event) - yield event + try: + for event in self.__wrapped__: + self._process_event(event) + yield event + except EventStreamError as exc: + self._stream_error_callback(exc) + raise def _process_event(self, event): if "messageStart" in event: @@ -85,12 +95,14 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy): def __init__( self, stream: EventStream, - stream_done_callback, + stream_done_callback: _StreamDoneCallableT, + stream_error_callback: _StreamErrorCallableT, model_id: str, ): super().__init__(stream) self._stream_done_callback = stream_done_callback + self._stream_error_callback = stream_error_callback self._model_id = model_id # accumulating things in the same shape of the Converse API @@ -98,9 +110,13 @@ def __init__( self._response = {} def __iter__(self): - for event in self.__wrapped__: - self._process_event(event) - yield event + try: + for event in self.__wrapped__: + self._process_event(event) + yield event + except EventStreamError as exc: + self._stream_error_callback(exc) + raise def _process_event(self, event): if "chunk" not in event: diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_handles_event_stream_error.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_handles_event_stream_error.yaml new file mode 100644 index 0000000000..07e18ab0e7 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_stream_handles_event_stream_error.yaml @@ -0,0 +1,71 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}], + "inferenceConfig": {"maxTokens": 10, "temperature": 0.8, "topP": 1, "stopSequences": + ["|"]}}' + headers: + Content-Length: + - '170' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjdUMTE0NjAyWg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWI5YzVlMjRlLWRmYzBjYTYyMmFiYjA2ZWEyMjAzZDZkYjtQYXJlbnQ9NDE0MWM4NWIx + ODkzMmI3OTtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + YjA0ZTAzYWEtMDg2MS00NGIzLTk3NmMtMWZjOGE5MzY5YTFl + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/converse-stream + response: + body: + string: !!binary | + AAAAswAAAFK3IJ11CzpldmVudC10eXBlBwAMbWVzc2FnZVN0YXJ0DTpjb250ZW50LXR5cGUHABBh + cHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsicCI6ImFiY2RlZmdoaWprbG1u + b3BxcnN0dXZ3eHl6QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVowMSIsInJvbGUiOiJhc3Npc3Rh + bnQifRl7p7oAAAC3AAAAVzLKzzoLOmV2ZW50LXR5cGUHABFjb250ZW50QmxvY2tEZWx0YQ06Y29u + dGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImNvbnRl + bnRCbG9ja0luZGV4IjowLCJkZWx0YSI6eyJ0ZXh0IjoiSGkhIEknbSBhbiBBSSBsYW5ndWFnZSJ9 + LCJwIjoiYWJjZGVmZ2gifUn9+AsAAACUAAAAVsOsqngLOmV2ZW50LXR5cGUHABBjb250ZW50Qmxv + Y2tTdG9wDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVl + dmVudHsiY29udGVudEJsb2NrSW5kZXgiOjAsInAiOiJhYmNkZWZnaGlqa2xtbm9wIn3KsHRKAAAA + pgAAAFGGKdQ9CzpldmVudC10eXBlBwALbWVzc2FnZVN0b3ANOmNvbnRlbnQtdHlwZQcAEGFwcGxp + Y2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJwIjoiYWJjZGVmZ2hpamtsbW5vcHFy + c3R1dnd4eXpBQkNERUZHSEkiLCJzdG9wUmVhc29uIjoibWF4X3Rva2VucyJ9eRUDZQAAAPUAAABO + dJJs0ws6ZXZlbnQtdHlwZQcACG1ldGFkYXRhDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9q + c29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsibWV0cmljcyI6eyJsYXRlbmN5TXMiOjY2NH0sInAi + OiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ekFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaMDEi + LCJ1c2FnZSI6eyJpbnB1dFRva2VucyI6OCwib3V0cHV0VG9rZW5zIjoxMCwidG90YWxUb2tlbnMi + OjE4fX3B+Dpy + headers: + Connection: + - keep-alive + Content-Type: + - application/vnd.amazon.eventstream + Date: + - Mon, 27 Jan 2025 11:46:02 GMT + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + x-amzn-RequestId: + - 657e0bef-5ebb-4387-be65-d3ceafd53dea + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_handles_stream_error.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_handles_stream_error.yaml new file mode 100644 index 0000000000..e29ddf9fc3 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_response_stream_handles_stream_error.yaml @@ -0,0 +1,62 @@ +interactions: +- request: + body: '{"inputText": "Say this is a test", "textGenerationConfig": {"maxTokenCount": + 10, "temperature": 0.8, "topP": 1, "stopSequences": ["|"]}}' + headers: + Content-Length: + - '137' + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMjdUMTIwMTU0Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWJhYTFjOTdhLTI3M2UxYTlhYjIyMTM1NGQwN2JjNGNhYztQYXJlbnQ9OTVhNmQzZGEx + YTZkZjM4ZjtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + ZWQxZGViZmQtZTE5NS00N2RiLWIyMzItMTY1MzJhYjQzZTM0 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/invoke-with-response-stream + response: + body: + string: !!binary | + AAACBAAAAEs8ZEC6CzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0 + aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5SnZkWFJ3ZFhSVVpYaDBJ + am9pSUdOdmJXMWxiblJjYmtobGJHeHZJU0JKSUdGdElHRWdZMjl0Y0hWMFpYSWdjSEp2WjNKaGJT + QmtaWE5wWjI1bFpDSXNJbWx1WkdWNElqb3dMQ0owYjNSaGJFOTFkSEIxZEZSbGVIUlViMnRsYmtO + dmRXNTBJam94TUN3aVkyOXRjR3hsZEdsdmJsSmxZWE52YmlJNklreEZUa2RVU0NJc0ltbHVjSFYw + VkdWNGRGUnZhMlZ1UTI5MWJuUWlPalVzSW1GdFlYcHZiaTFpWldSeWIyTnJMV2x1ZG05allYUnBi + MjVOWlhSeWFXTnpJanA3SW1sdWNIVjBWRzlyWlc1RGIzVnVkQ0k2TlN3aWIzVjBjSFYwVkc5clpX + NURiM1Z1ZENJNk1UQXNJbWx1ZG05allYUnBiMjVNWVhSbGJtTjVJam8yTnpRc0ltWnBjbk4wUW5s + MFpVeGhkR1Z1WTNraU9qWTNNMzE5IiwicCI6ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6In2J + Hw51 + headers: + Connection: + - keep-alive + Content-Type: + - application/vnd.amazon.eventstream + Date: + - Mon, 27 Jan 2025 12:01:55 GMT + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Amzn-Bedrock-Content-Type: + - application/json + x-amzn-RequestId: + - 1eb1af77-fb2f-400f-9bf8-049e38b90f02 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py index b9f5589988..f277ba895e 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -15,9 +15,11 @@ from __future__ import annotations import json +from unittest import mock import boto3 import pytest +from botocore.eventstream import EventStream, EventStreamError from opentelemetry.semconv._incubating.attributes.error_attributes import ( ERROR_TYPE, @@ -171,6 +173,65 @@ def test_converse_stream_with_content( assert len(logs) == 0 +@pytest.mark.skipif( + BOTO3_VERSION < (1, 35, 56), reason="ConverseStream API not available" +) +@pytest.mark.vcr() +def test_converse_stream_handles_event_stream_error( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + # pylint:disable=too-many-locals + messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] + + llm_model_value = "amazon.titan-text-lite-v1" + max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] + response = bedrock_runtime_client.converse_stream( + messages=messages, + modelId=llm_model_value, + inferenceConfig={ + "maxTokens": max_tokens, + "temperature": temperature, + "topP": top_p, + "stopSequences": stop_sequences, + }, + ) + + with mock.patch.object( + EventStream, + "_parse_event", + side_effect=EventStreamError( + {"modelStreamErrorException": {}}, "ConverseStream" + ), + ): + with pytest.raises(EventStreamError): + for _event in response["stream"]: + pass + + (span,) = span_exporter.get_finished_spans() + input_tokens, output_tokens, finish_reason = None, None, None + assert_stream_completion_attributes( + span, + llm_model_value, + input_tokens, + output_tokens, + finish_reason, + "chat", + top_p, + temperature, + max_tokens, + stop_sequences, + ) + + assert span.status.status_code == StatusCode.ERROR + assert span.attributes[ERROR_TYPE] == "EventStreamError" + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + @pytest.mark.skipif( BOTO3_VERSION < (1, 35, 56), reason="ConverseStream API not available" ) @@ -413,6 +474,56 @@ def test_invoke_model_with_response_stream_with_content( assert len(logs) == 0 +@pytest.mark.vcr() +def test_invoke_model_with_response_stream_handles_stream_error( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + # pylint:disable=too-many-locals + llm_model_value = "amazon.titan-text-lite-v1" + max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] + body = get_invoke_model_body( + llm_model_value, max_tokens, temperature, top_p, stop_sequences + ) + response = bedrock_runtime_client.invoke_model_with_response_stream( + body=body, + modelId=llm_model_value, + ) + + # consume the stream in order to have it traced + finish_reason = None + input_tokens, output_tokens = None, None + with mock.patch.object( + EventStream, + "_parse_event", + side_effect=EventStreamError( + {"modelStreamErrorException": {}}, "InvokeModelWithRespnseStream" + ), + ): + with pytest.raises(EventStreamError): + for _event in response["body"]: + pass + + (span,) = span_exporter.get_finished_spans() + assert_stream_completion_attributes( + span, + llm_model_value, + input_tokens, + output_tokens, + finish_reason, + "text_completion", + top_p, + temperature, + max_tokens, + stop_sequences, + ) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + @pytest.mark.vcr() def test_invoke_model_with_response_stream_invalid_model( span_exporter, From 7af1918b89c57b807bd9828615098b56295cc3f2 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 28 Jan 2025 21:22:56 +0100 Subject: [PATCH 14/16] botocore: remove amazon copyright from bedrock_utils (#3215) These code has been written from scratch --- .../instrumentation/botocore/extensions/bedrock_utils.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py index 5911c91445..8d0d806f43 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py @@ -12,10 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Includes work from: -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - from __future__ import annotations import json From 748c92592d2f476199667629defce4a3bca9ecc9 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Wed, 29 Jan 2025 17:24:39 -0500 Subject: [PATCH 15/16] VertexAI emit user, system, and assistant events (#3203) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * VertexAI emit user events * Emit system and assistant events * Fix for python 3.8 * Record events regardless of span recording * fix tests * Apply suggestions from code review Co-authored-by: Emídio Neto <9735060+emdneto@users.noreply.github.com> --------- Co-authored-by: Emídio Neto <9735060+emdneto@users.noreply.github.com> --- .../CHANGELOG.md | 2 + .../instrumentation/vertexai/events.py | 91 ++++++++++ .../instrumentation/vertexai/patch.py | 18 +- .../instrumentation/vertexai/utils.py | 53 +++++- ...est_generate_content_all_input_events.yaml | 94 ++++++++++ .../test_generate_content_invalid_role.yaml | 56 ++++++ .../test_generate_content_without_events.yaml | 70 ++++++++ .../tests/test_chat_completions.py | 161 ++++++++++++++++++ 8 files changed, 534 insertions(+), 11 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/events.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_all_input_events.yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_role.yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_without_events.yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md index 44725df52d..5b66ea7972 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md @@ -13,3 +13,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3123](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3123)) - Add server attributes to Vertex AI spans ([#3208](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3208)) +- VertexAI emit user, system, and assistant events + ([#3203](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3203)) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/events.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/events.py new file mode 100644 index 0000000000..5d011006de --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/events.py @@ -0,0 +1,91 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Factories for event types described in +https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-events.md#system-event. + +Hopefully this code can be autogenerated by Weaver once Gen AI semantic conventions are +schematized in YAML and the Weaver tool supports it. +""" + +from opentelemetry._events import Event +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes +from opentelemetry.util.types import AnyValue + + +def user_event( + *, + role: str = "user", + content: AnyValue = None, +) -> Event: + """Creates a User event + https://github.com/open-telemetry/semantic-conventions/blob/v1.28.0/docs/gen-ai/gen-ai-events.md#user-event + """ + body: dict[str, AnyValue] = { + "role": role, + } + if content is not None: + body["content"] = content + return Event( + name="gen_ai.user.message", + attributes={ + gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + }, + body=body, + ) + + +def assistant_event( + *, + role: str = "assistant", + content: AnyValue = None, +) -> Event: + """Creates an Assistant event + https://github.com/open-telemetry/semantic-conventions/blob/v1.28.0/docs/gen-ai/gen-ai-events.md#assistant-event + """ + body: dict[str, AnyValue] = { + "role": role, + } + if content is not None: + body["content"] = content + return Event( + name="gen_ai.assistant.message", + attributes={ + gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + }, + body=body, + ) + + +def system_event( + *, + role: str = "system", + content: AnyValue = None, +) -> Event: + """Creates a System event + https://github.com/open-telemetry/semantic-conventions/blob/v1.28.0/docs/gen-ai/gen-ai-events.md#system-event + """ + body: dict[str, AnyValue] = { + "role": role, + } + if content is not None: + body["content"] = content + return Event( + name="gen_ai.system.message", + attributes={ + gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + }, + body=body, + ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py index ecb87e4360..fe0a9cdf60 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py @@ -27,6 +27,7 @@ get_genai_request_attributes, get_server_attributes, get_span_name, + request_to_events, ) from opentelemetry.trace import SpanKind, Tracer @@ -113,12 +114,10 @@ def traced_method( kind=SpanKind.CLIENT, attributes=span_attributes, ) as _span: - # TODO: emit request events - # if span.is_recording(): - # for message in kwargs.get("messages", []): - # event_logger.emit( - # message_to_event(message, capture_content) - # ) + for event in request_to_events( + params=params, capture_content=capture_content + ): + event_logger.emit(event) # TODO: set error.type attribute # https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md @@ -130,10 +129,9 @@ def traced_method( # ) # TODO: add response attributes and events - # if span.is_recording(): - # _set_response_attributes( - # span, result, event_logger, capture_content - # ) + # _set_response_attributes( + # span, result, event_logger, capture_content + # ) return result return traced_method diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py index e4297bc878..3e6de918f9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py @@ -19,16 +19,24 @@ from os import environ from typing import ( TYPE_CHECKING, + Iterable, Mapping, Sequence, + cast, ) from urllib.parse import urlparse +from opentelemetry._events import Event +from opentelemetry.instrumentation.vertexai.events import ( + assistant_event, + system_event, + user_event, +) from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) from opentelemetry.semconv.attributes import server_attributes -from opentelemetry.util.types import AttributeValue +from opentelemetry.util.types import AnyValue, AttributeValue if TYPE_CHECKING: from google.cloud.aiplatform_v1.types import content, tool @@ -157,3 +165,46 @@ def get_span_name(span_attributes: Mapping[str, AttributeValue]) -> str: if not model: return f"{name}" return f"{name} {model}" + + +def request_to_events( + *, params: GenerateContentParams, capture_content: bool +) -> Iterable[Event]: + # System message + if params.system_instruction: + request_content = _parts_to_any_value( + capture_content=capture_content, + parts=params.system_instruction.parts, + ) + yield system_event( + role=params.system_instruction.role, content=request_content + ) + + for content in params.contents or []: + # Assistant message + if content.role == "model": + request_content = _parts_to_any_value( + capture_content=capture_content, parts=content.parts + ) + + yield assistant_event(role=content.role, content=request_content) + # Assume user event but role should be "user" + else: + request_content = _parts_to_any_value( + capture_content=capture_content, parts=content.parts + ) + yield user_event(role=content.role, content=request_content) + + +def _parts_to_any_value( + *, + capture_content: bool, + parts: Sequence[content.Part] | Sequence[content_v1beta1.Part], +) -> list[dict[str, AnyValue]] | None: + if not capture_content: + return None + + return [ + cast("dict[str, AnyValue]", type(part).to_dict(part)) # type: ignore[reportUnknownMemberType] + for part in parts + ] diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_all_input_events.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_all_input_events.yaml new file mode 100644 index 0000000000..47c5ce6645 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_all_input_events.yaml @@ -0,0 +1,94 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "role": "user", + "parts": [ + { + "text": "My name is OpenTelemetry" + } + ] + }, + { + "role": "model", + "parts": [ + { + "text": "Hello OpenTelemetry!" + } + ] + }, + { + "role": "user", + "parts": [ + { + "text": "Address me by name and say this is a test" + } + ] + } + ], + "systemInstruction": { + "role": "user", + "parts": [ + { + "text": "You are a clever language model" + } + ] + } + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '548' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "OpenTelemetry, this is a test.\n" + } + ] + }, + "finishReason": 1, + "avgLogprobs": -1.1655389850299496e-06 + } + ], + "usageMetadata": { + "promptTokenCount": 25, + "candidatesTokenCount": 9, + "totalTokenCount": 34 + }, + "modelVersion": "gemini-1.5-flash-002" + } + headers: + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + content-length: + - '422' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_role.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_role.yaml new file mode 100644 index 0000000000..dcc40f2fdf --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_invalid_role.yaml @@ -0,0 +1,56 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "role": "invalid_role", + "parts": [ + { + "text": "Say this is a test" + } + ] + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '149' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: |- + { + "error": { + "code": 400, + "message": "Please use a valid role: user, model.", + "status": "INVALID_ARGUMENT", + "details": [] + } + } + headers: + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + content-length: + - '416' + status: + code: 400 + message: Bad Request +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_without_events.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_without_events.yaml new file mode 100644 index 0000000000..0a71d24512 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_generate_content_without_events.yaml @@ -0,0 +1,70 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "role": "user", + "parts": [ + { + "text": "Say this is a test" + } + ] + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '141' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "Okay, I understand. I'm ready for your test. Please proceed.\n" + } + ] + }, + "finishReason": 1, + "avgLogprobs": -0.005519990466142956 + } + ], + "usageMetadata": { + "promptTokenCount": 5, + "candidatesTokenCount": 19, + "totalTokenCount": 24 + }, + "modelVersion": "gemini-1.5-flash-002" + } + headers: + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + content-length: + - '453' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py index 2582a086f6..4a1ab1beba 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py @@ -8,6 +8,9 @@ ) from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor +from opentelemetry.sdk._logs._internal.export.in_memory_log_exporter import ( + InMemoryLogExporter, +) from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, @@ -18,6 +21,7 @@ @pytest.mark.vcr def test_generate_content( span_exporter: InMemorySpanExporter, + log_exporter: InMemoryLogExporter, instrument_with_content: VertexAIInstrumentor, ): model = GenerativeModel("gemini-1.5-flash-002") @@ -27,6 +31,50 @@ def test_generate_content( ] ) + # Emits span + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + assert spans[0].name == "chat gemini-1.5-flash-002" + assert dict(spans[0].attributes) == { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "gemini-1.5-flash-002", + "gen_ai.system": "vertex_ai", + "server.address": "us-central1-aiplatform.googleapis.com", + "server.port": 443, + } + + # Emits content event + logs = log_exporter.get_finished_logs() + assert len(logs) == 1 + log_record = logs[0].log_record + span_context = spans[0].get_span_context() + assert log_record.trace_id == span_context.trace_id + assert log_record.span_id == span_context.span_id + assert log_record.trace_flags == span_context.trace_flags + assert log_record.attributes == { + "gen_ai.system": "vertex_ai", + "event.name": "gen_ai.user.message", + } + assert log_record.body == { + "content": [{"text": "Say this is a test"}], + "role": "user", + } + + +@pytest.mark.vcr +def test_generate_content_without_events( + span_exporter: InMemorySpanExporter, + log_exporter: InMemoryLogExporter, + instrument_no_content: VertexAIInstrumentor, +): + model = GenerativeModel("gemini-1.5-flash-002") + model.generate_content( + [ + Content(role="user", parts=[Part.from_text("Say this is a test")]), + ] + ) + + # Emits span spans = span_exporter.get_finished_spans() assert len(spans) == 1 assert spans[0].name == "chat gemini-1.5-flash-002" @@ -38,6 +86,16 @@ def test_generate_content( "server.port": 443, } + # Emits event without body.content + logs = log_exporter.get_finished_logs() + assert len(logs) == 1 + log_record = logs[0].log_record + assert log_record.attributes == { + "gen_ai.system": "vertex_ai", + "event.name": "gen_ai.user.message", + } + assert log_record.body == {"role": "user"} + @pytest.mark.vcr def test_generate_content_empty_model( @@ -134,6 +192,38 @@ def test_generate_content_invalid_temperature( assert_span_error(spans[0]) +@pytest.mark.vcr +def test_generate_content_invalid_role( + log_exporter: InMemoryLogExporter, + instrument_with_content: VertexAIInstrumentor, +): + model = GenerativeModel("gemini-1.5-flash-002") + try: + # Fails because role must be "user" or "model" + model.generate_content( + [ + Content( + role="invalid_role", + parts=[Part.from_text("Say this is a test")], + ) + ] + ) + except BadRequest: + pass + + # Emits the faulty content which caused the request to fail + logs = log_exporter.get_finished_logs() + assert len(logs) == 1 + assert logs[0].log_record.attributes == { + "gen_ai.system": "vertex_ai", + "event.name": "gen_ai.user.message", + } + assert logs[0].log_record.body == { + "content": [{"text": "Say this is a test"}], + "role": "invalid_role", + } + + @pytest.mark.vcr() def test_generate_content_extra_params(span_exporter, instrument_no_content): generation_config = GenerationConfig( @@ -181,3 +271,74 @@ def assert_span_error(span: ReadableSpan) -> None: # Records exception event error_events = [e for e in span.events if e.name == "exception"] assert error_events != [] + + +@pytest.mark.vcr +def test_generate_content_all_input_events( + log_exporter: InMemoryLogExporter, + instrument_with_content: VertexAIInstrumentor, +): + model = GenerativeModel( + "gemini-1.5-flash-002", + system_instruction=Part.from_text("You are a clever language model"), + ) + model.generate_content( + [ + Content( + role="user", parts=[Part.from_text("My name is OpenTelemetry")] + ), + Content( + role="model", parts=[Part.from_text("Hello OpenTelemetry!")] + ), + Content( + role="user", + parts=[ + Part.from_text("Address me by name and say this is a test") + ], + ), + ], + ) + + # Emits a system event, 2 users events, and a assistant event + logs = log_exporter.get_finished_logs() + assert len(logs) == 4 + system_log, user_log1, assistant_log, user_log2 = [ + log_data.log_record for log_data in logs + ] + + assert system_log.attributes == { + "gen_ai.system": "vertex_ai", + "event.name": "gen_ai.system.message", + } + assert system_log.body == { + "content": [{"text": "You are a clever language model"}], + # The API only allows user and model, so system instruction is considered a user role + "role": "user", + } + + assert user_log1.attributes == { + "gen_ai.system": "vertex_ai", + "event.name": "gen_ai.user.message", + } + assert user_log1.body == { + "content": [{"text": "My name is OpenTelemetry"}], + "role": "user", + } + + assert assistant_log.attributes == { + "gen_ai.system": "vertex_ai", + "event.name": "gen_ai.assistant.message", + } + assert assistant_log.body == { + "content": [{"text": "Hello OpenTelemetry!"}], + "role": "model", + } + + assert user_log2.attributes == { + "gen_ai.system": "vertex_ai", + "event.name": "gen_ai.user.message", + } + assert user_log2.body == { + "content": [{"text": "Address me by name and say this is a test"}], + "role": "user", + } From 65a2713d9f341edc1376cd275549918807f334a1 Mon Sep 17 00:00:00 2001 From: Tammy Baylis <96076570+tammy-baylis-swi@users.noreply.github.com> Date: Thu, 30 Jan 2025 09:43:17 -0800 Subject: [PATCH 16/16] Add mysql-connector instrumentor support for sqlcommenting (#3163) --- CHANGELOG.md | 2 + .../instrumentation/mysql/__init__.py | 116 +++++- .../tests/test_mysql_integration.py | 355 ++++++++++++++++++ 3 files changed, 472 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce7f9acf03..2561ebcf2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3206](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3206)) - `opentelemetry-instrumentation-pymssql` Add pymssql instrumentation ([#394](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/394)) +- `opentelemetry-instrumentation-mysql` Add sqlcommenter support + ([#3163](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3163)) ### Fixed diff --git a/instrumentation/opentelemetry-instrumentation-mysql/src/opentelemetry/instrumentation/mysql/__init__.py b/instrumentation/opentelemetry-instrumentation-mysql/src/opentelemetry/instrumentation/mysql/__init__.py index 0116dab1c3..e967a1a2ea 100644 --- a/instrumentation/opentelemetry-instrumentation-mysql/src/opentelemetry/instrumentation/mysql/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-mysql/src/opentelemetry/instrumentation/mysql/__init__.py @@ -50,6 +50,95 @@ cursor.close() instrumented_cnx.close() +SQLCOMMENTER +***************************************** +You can optionally configure mysql-connector instrumentation to enable sqlcommenter which enriches the query with contextual information. + +Usage +----- + +.. code:: python + + import mysql.connector + from opentelemetry.instrumentation.mysql import MySQLInstrumentor + + MySQLInstrumentor().instrument(enable_commenter=True, commenter_options={}) + + cnx = mysql.connector.connect(database="MySQL_Database") + cursor = cnx.cursor() + cursor.execute("INSERT INTO test (testField) VALUES (123)") + cursor.close() + cnx.close() + + +For example, +:: + + Invoking cursor.execute("INSERT INTO test (testField) VALUES (123)") will lead to sql query "INSERT INTO test (testField) VALUES (123)" but when SQLCommenter is enabled + the query will get appended with some configurable tags like "INSERT INTO test (testField) VALUES (123) /*tag=value*/;" + +**WARNING:** sqlcommenter for mysql-connector instrumentation should NOT be used if your application initializes cursors with ``prepared=True``, which will natively prepare and execute MySQL statements. Adding sqlcommenting will introduce a severe performance penalty by repeating ``Prepare`` of statements by mysql-connector that are made unique by traceparent in sqlcomment. The penalty does not happen if cursor ``prepared=False`` (default) and instrumentor ``enable_commenter=True``. + +SQLCommenter Configurations +*************************** +We can configure the tags to be appended to the sqlquery log by adding configuration inside commenter_options(default:{}) keyword + +db_driver = True(Default) or False + +For example, +:: +Enabling this flag will add mysql.connector and its version, e.g. /*mysql.connector%%3A1.2.3*/ + +dbapi_threadsafety = True(Default) or False + +For example, +:: +Enabling this flag will add threadsafety /*dbapi_threadsafety=2*/ + +dbapi_level = True(Default) or False + +For example, +:: +Enabling this flag will add dbapi_level /*dbapi_level='2.0'*/ + +mysql_client_version = True(Default) or False + +For example, +:: +Enabling this flag will add mysql_client_version /*mysql_client_version='123'*/ + +driver_paramstyle = True(Default) or False + +For example, +:: +Enabling this flag will add driver_paramstyle /*driver_paramstyle='pyformat'*/ + +opentelemetry_values = True(Default) or False + +For example, +:: +Enabling this flag will add traceparent values /*traceparent='00-03afa25236b8cd948fa853d67038ac79-405ff022e8247c46-01'*/ + +SQLComment in span attribute +**************************** +If sqlcommenter is enabled, you can optionally configure mysql-connector instrumentation to append sqlcomment to query span attribute for convenience of your platform. + +.. code:: python + + from opentelemetry.instrumentation.mysql import MySQLInstrumentor + + MySQLInstrumentor().instrument( + enable_commenter=True, + enable_attribute_commenter=True, + ) + + +For example, +:: + + Invoking cursor.execute("select * from auth_users") will lead to sql query "select * from auth_users" but when SQLCommenter and attribute_commenter are enabled + the query will get appended with some configurable tags like "select * from auth_users /*tag=value*/;" for both server query and `db.statement` span attribute. + API --- """ @@ -82,6 +171,11 @@ def _instrument(self, **kwargs): https://dev.mysql.com/doc/connector-python/en/ """ tracer_provider = kwargs.get("tracer_provider") + enable_sqlcommenter = kwargs.get("enable_commenter", False) + commenter_options = kwargs.get("commenter_options", {}) + enable_attribute_commenter = kwargs.get( + "enable_attribute_commenter", False + ) dbapi.wrap_connect( __name__, @@ -91,6 +185,9 @@ def _instrument(self, **kwargs): self._CONNECTION_ATTRIBUTES, version=__version__, tracer_provider=tracer_provider, + enable_commenter=enable_sqlcommenter, + commenter_options=commenter_options, + enable_attribute_commenter=enable_attribute_commenter, ) def _uninstrument(self, **kwargs): @@ -98,7 +195,14 @@ def _uninstrument(self, **kwargs): dbapi.unwrap_connect(mysql.connector, "connect") # pylint:disable=no-self-use - def instrument_connection(self, connection, tracer_provider=None): + def instrument_connection( + self, + connection, + tracer_provider=None, + enable_commenter=None, + commenter_options=None, + enable_attribute_commenter=None, + ): """Enable instrumentation in a MySQL connection. Args: @@ -109,6 +213,12 @@ def instrument_connection(self, connection, tracer_provider=None): tracer_provider: An optional `TracerProvider` instance to use for tracing. If not provided, the globally configured tracer provider will be automatically used. + enable_commenter: + Optional flag to enable/disable sqlcommenter (default False). + commenter_options: + Optional configurations for tags to be appended at the sql query. + enable_attribute_commenter: + Optional flag to enable/disable addition of sqlcomment to span attribute (default False). Requires enable_commenter=True. Returns: An instrumented MySQL connection with OpenTelemetry tracing enabled. @@ -120,6 +230,10 @@ def instrument_connection(self, connection, tracer_provider=None): self._CONNECTION_ATTRIBUTES, version=__version__, tracer_provider=tracer_provider, + enable_commenter=enable_commenter, + commenter_options=commenter_options, + connect_module=mysql.connector, + enable_attribute_commenter=enable_attribute_commenter, ) def uninstrument_connection(self, connection): diff --git a/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py b/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py index 79399cce7f..0ef2f4e4cb 100644 --- a/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py +++ b/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py @@ -20,6 +20,7 @@ from opentelemetry import trace as trace_api from opentelemetry.instrumentation.mysql import MySQLInstrumentor from opentelemetry.sdk import resources +from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.test.test_base import TestBase @@ -102,6 +103,360 @@ def test_instrument_connection_no_op_tracer_provider(self, mock_connect): spans_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans_list), 0) + @mock.patch("opentelemetry.instrumentation.dbapi.instrument_connection") + @mock.patch("mysql.connector") + # pylint: disable=unused-argument + def test_instrument_connection_enable_commenter_dbapi_kwargs( + self, + mock_connect, + mock_instrument_connection, + ): + cnx = mysql.connector.connect(database="test") + cnx = MySQLInstrumentor().instrument_connection( + cnx, + enable_commenter=True, + commenter_options={"foo": True}, + enable_attribute_commenter=True, + ) + cursor = cnx.cursor() + cursor.execute("SELECT * FROM test") + kwargs = mock_instrument_connection.call_args[1] + self.assertEqual(kwargs["enable_commenter"], True) + self.assertEqual(kwargs["commenter_options"], {"foo": True}) + self.assertEqual(kwargs["enable_attribute_commenter"], True) + + def test_instrument_connection_with_dbapi_sqlcomment_enabled(self): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + cnx_proxy = MySQLInstrumentor().instrument_connection( + mock_connection, + enable_commenter=True, + ) + cnx_proxy.cursor().execute("Select 1;") + + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + span_id = format(span.get_span_context().span_id, "016x") + trace_id = format(span.get_span_context().trace_id, "032x") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_level='123',dbapi_threadsafety='123',driver_paramstyle='test',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + "Select 1;", + ) + + def test_instrument_connection_with_dbapi_sqlcomment_enabled_stmt_enabled( + self, + ): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + cnx_proxy = MySQLInstrumentor().instrument_connection( + mock_connection, + enable_commenter=True, + enable_attribute_commenter=True, + ) + cnx_proxy.cursor().execute("Select 1;") + + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + span_id = format(span.get_span_context().span_id, "016x") + trace_id = format(span.get_span_context().trace_id, "032x") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_level='123',dbapi_threadsafety='123',driver_paramstyle='test',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_level='123',dbapi_threadsafety='123',driver_paramstyle='test',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + + def test_instrument_connection_with_dbapi_sqlcomment_enabled_with_options( + self, + ): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + cnx_proxy = MySQLInstrumentor().instrument_connection( + mock_connection, + enable_commenter=True, + commenter_options={ + "dbapi_level": False, + "dbapi_threadsafety": True, + "driver_paramstyle": False, + }, + ) + cnx_proxy.cursor().execute("Select 1;") + + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + span_id = format(span.get_span_context().span_id, "016x") + trace_id = format(span.get_span_context().trace_id, "032x") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_threadsafety='123',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + "Select 1;", + ) + + def test_instrument_connection_with_dbapi_sqlcomment_not_enabled_default( + self, + ): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_cursor = mock_connect_module.connect().cursor() + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + cnx_proxy = MySQLInstrumentor().instrument_connection( + mock_connection, + ) + cnx_proxy.cursor().execute("Select 1;") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + "Select 1;", + ) + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + "Select 1;", + ) + + @mock.patch("opentelemetry.instrumentation.dbapi.wrap_connect") + @mock.patch("mysql.connector") + # pylint: disable=unused-argument + def test_instrument_enable_commenter_dbapi_kwargs( + self, + mock_connect, + mock_wrap_connect, + ): + MySQLInstrumentor()._instrument( + enable_commenter=True, + commenter_options={"foo": True}, + enable_attribute_commenter=True, + ) + kwargs = mock_wrap_connect.call_args[1] + self.assertEqual(kwargs["enable_commenter"], True) + self.assertEqual(kwargs["commenter_options"], {"foo": True}) + self.assertEqual(kwargs["enable_attribute_commenter"], True) + + def test_instrument_with_dbapi_sqlcomment_enabled( + self, + ): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_cursor = mock_connect_module.connect().cursor() + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + MySQLInstrumentor()._instrument( + enable_commenter=True, + ) + cnx = mock_connect_module.connect(database="test") + cursor = cnx.cursor() + cursor.execute("Select 1;") + + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + span_id = format(span.get_span_context().span_id, "016x") + trace_id = format(span.get_span_context().trace_id, "032x") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_level='123',dbapi_threadsafety='123',driver_paramstyle='test',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + "Select 1;", + ) + + def test_instrument_with_dbapi_sqlcomment_enabled_stmt_enabled( + self, + ): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_cursor = mock_connect_module.connect().cursor() + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + MySQLInstrumentor()._instrument( + enable_commenter=True, + enable_attribute_commenter=True, + ) + cnx = mock_connect_module.connect(database="test") + cursor = cnx.cursor() + cursor.execute("Select 1;") + + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + span_id = format(span.get_span_context().span_id, "016x") + trace_id = format(span.get_span_context().trace_id, "032x") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_level='123',dbapi_threadsafety='123',driver_paramstyle='test',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_level='123',dbapi_threadsafety='123',driver_paramstyle='test',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + + def test_instrument_with_dbapi_sqlcomment_enabled_with_options( + self, + ): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_cursor = mock_connect_module.connect().cursor() + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + MySQLInstrumentor()._instrument( + enable_commenter=True, + commenter_options={ + "dbapi_level": False, + "dbapi_threadsafety": True, + "driver_paramstyle": False, + }, + ) + cnx = mock_connect_module.connect(database="test") + cursor = cnx.cursor() + cursor.execute("Select 1;") + + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + span_id = format(span.get_span_context().span_id, "016x") + trace_id = format(span.get_span_context().trace_id, "032x") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + f"Select 1 /*db_driver='mysql.connector%%3Afoobar',dbapi_threadsafety='123',mysql_client_version='foobaz',traceparent='00-{trace_id}-{span_id}-01'*/;", + ) + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + "Select 1;", + ) + + def test_instrument_with_dbapi_sqlcomment_not_enabled_default( + self, + ): + mock_connect_module = mock.MagicMock( + __name__="mysql.connector", + __version__="foobar", + threadsafety="123", + apilevel="123", + paramstyle="test", + ) + mock_cursor = mock_connect_module.connect().cursor() + mock_cursor._cnx._cmysql.get_client_info.return_value = "foobaz" + mock_cursor = mock_connect_module.connect().cursor() + mock_connection = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + + with mock.patch( + "opentelemetry.instrumentation.mysql.mysql.connector", + mock_connect_module, + ): + MySQLInstrumentor()._instrument() + cnx = mock_connect_module.connect(database="test") + cursor = cnx.cursor() + cursor.execute("Select 1;") + self.assertEqual( + mock_cursor.execute.call_args[0][0], + "Select 1;", + ) + spans_list = self.memory_exporter.get_finished_spans() + span = spans_list[0] + self.assertEqual( + span.attributes[SpanAttributes.DB_STATEMENT], + "Select 1;", + ) + @mock.patch("mysql.connector.connect") # pylint: disable=unused-argument def test_uninstrument_connection(self, mock_connect):