Skip to content

Commit

Permalink
Merge from main.
Browse files Browse the repository at this point in the history
  • Loading branch information
zzhlogin committed Jun 28, 2024
2 parents a0fc9c9 + 912dd93 commit 09b8a2d
Show file tree
Hide file tree
Showing 21 changed files with 543 additions and 172 deletions.
97 changes: 97 additions & 0 deletions .github/workflows/codeql.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"

on:
push:
branches: [ "main", "release/v*" ]
pull_request:
branches: [ "main", "release/v*" ]
schedule:
- cron: '45 12 * * 1'

jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
permissions:
# required for all workflows
security-events: write

# required to fetch internal or private CodeQL packs
packages: read

# only required for workflows in private repositories
actions: read
contents: read

strategy:
fail-fast: false
matrix:
include:
- language: java-kotlin
build-mode: none # This mode only analyzes Java. Set this to 'autobuild' or 'manual' to analyze Kotlin too.
- language: javascript-typescript
build-mode: none
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4

# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.

# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality

# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"
2 changes: 1 addition & 1 deletion .github/workflows/main_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ jobs:
# Application Signals specific e2e tests for ec2
application-signals-python-e2e-ec2-test:
needs: [ build ]
uses: aws-observability/aws-application-signals-test-framework/.github/workflows/application-signals-python-e2e-ec2-test.yml@main
uses: aws-observability/aws-application-signals-test-framework/.github/workflows/application-signals-python-e2e-ec2-default-test.yml@main
secrets: inherit
with:
aws-region: ${{ needs.build.outputs.aws_default_region }}
Expand Down
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,5 @@ target
examples/**/build/

# Performance test results
**/performance-tests/results/
**/performance-tests/results/

4 changes: 4 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ Contributions via pull requests are much appreciated. Before sending us a pull r
1. You are working against the latest source on the *main* branch.
2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
4. You are not mixing substantial refactoring changes in with functional changes.
1. If refactoring is desirable, publish a separate refactoring PR first, followed by a functional change PR. This will ensure safe and efficient reviews.
2. PRs that do not meet these expectations will be rejected.

To send us a pull request, please:

Expand All @@ -32,6 +35,7 @@ To send us a pull request, please:
4. Commit to your fork using clear commit messages.
5. Send us a pull request, answering any default questions in the pull request interface.
6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
7. Please do not squash commits between revisions, this makes review challenging, as the diff between revisions is harder to find and review.

GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,52 +20,71 @@
_BEDROCK_GUARDRAIL_ID: str = "GuardrailId"
_BEDROCK_KNOWLEDGEBASE_ID: str = "KnowledgeBaseId"

# Patch names
GET_DISTRIBUTION_PATCH: str = (
"amazon.opentelemetry.distro.patches._instrumentation_patch.pkg_resources.get_distribution"
)

class TestInstrumentationPatch(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mock_get_distribution = patch(
"amazon.opentelemetry.distro.patches._instrumentation_patch.pkg_resources.get_distribution"
).start()

@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.mock_get_distribution.stop()

def test_botocore_not_installed(self):
# Test scenario 1: Botocore package not installed
self.mock_get_distribution.side_effect = pkg_resources.DistributionNotFound
apply_instrumentation_patches()
with patch(
"amazon.opentelemetry.distro.patches._botocore_patches._apply_botocore_instrumentation_patches"
) as mock_apply_patches:
mock_apply_patches.assert_not_called()

def test_botocore_installed_wrong_version(self):
# Test scenario 2: Botocore package installed with wrong version
self.mock_get_distribution.side_effect = pkg_resources.VersionConflict("botocore==1.0.0", "botocore==0.0.1")
apply_instrumentation_patches()
with patch(
"amazon.opentelemetry.distro.patches._botocore_patches._apply_botocore_instrumentation_patches"
) as mock_apply_patches:
mock_apply_patches.assert_not_called()
class TestInstrumentationPatch(TestCase):
"""
This test class has exactly one test, test_instrumentation_patch. This is an anti-pattern, but the scenario is
fairly unusual and we feel justifies the code smell. Essentially the _instrumentation_patch module monkey-patches
upstream components, so once it's run, it's challenging to "undo" between tests. To work around this, we have a
monolith test framework that tests two major categories of test scenarios:
1. Patch behaviour
2. Patch mechanism
Patch behaviour tests validate upstream behaviour without patches, apply patches, and validate patched behaviour.
Patch mechanism tests validate the logic that is used to actually apply patches, and can be run regardless of the
pre- or post-patch behaviour.
"""

method_patches: Dict[str, patch] = {}
mock_metric_exporter_init: patch

def test_instrumentation_patch(self):
# Set up method patches used by all tests
self.method_patches[GET_DISTRIBUTION_PATCH] = patch(GET_DISTRIBUTION_PATCH).start()

# Run tests that validate patch behaviour before and after patching
self._run_patch_behaviour_tests()
# Run tests not specifically related to patch behaviour
self._run_patch_mechanism_tests()

# Clean up method patches
for method_patch in self.method_patches.values():
method_patch.stop()

def _run_patch_behaviour_tests(self):
# Test setup
self.method_patches[GET_DISTRIBUTION_PATCH].return_value = "CorrectDistributionObject"

def test_botocore_installed_correct_version(self):
# Test scenario 3: Botocore package installed with correct version
# Validate unpatched upstream behaviour - important to detect upstream changes that may break instrumentation
self._validate_unpatched_botocore_instrumentation()

self.mock_get_distribution.return_value = "CorrectDistributionObject"
self._test_unpatched_botocore_instrumentation()

# Apply patches
apply_instrumentation_patches()

# Validate patched upstream behaviour - important to detect downstream changes that may break instrumentation
self._validate_patched_botocore_instrumentation()

def _validate_unpatched_botocore_instrumentation(self):
self._test_patched_botocore_instrumentation()

# Test teardown
self._reset_mocks()

def _run_patch_mechanism_tests(self):
"""
Each test should be invoked, resetting mocks in between each test. E.g.:
self.test_x()
self.reset_mocks()
self.test_y()
self.reset_mocks()
etc.
"""
self._test_botocore_installed_flag()
self._reset_mocks()

def _test_unpatched_botocore_instrumentation(self):
# Kinesis
self.assertFalse("kinesis" in _KNOWN_EXTENSIONS, "Upstream has added a Kinesis extension")

Expand All @@ -90,7 +109,7 @@ def _validate_unpatched_botocore_instrumentation(self):
"bedrock-agent-runtime" in _KNOWN_EXTENSIONS, "Upstream has added a Bedrock Agent Runtime extension"
)

def _validate_patched_botocore_instrumentation(self):
def _test_patched_botocore_instrumentation(self):
# Kinesis
self.assertTrue("kinesis" in _KNOWN_EXTENSIONS)
kinesis_attributes: Dict[str, str] = _do_extract_kinesis_attributes()
Expand Down Expand Up @@ -149,6 +168,28 @@ def _validate_patched_botocore_instrumentation(self):
self.assertTrue("aws.bedrock.knowledgebase_id" in bedrock_agent_runtime_attributes)
self.assertEqual(bedrock_agent_runtime_attributes["aws.bedrock.knowledgebase_id"], _BEDROCK_KNOWLEDGEBASE_ID)

def _test_botocore_installed_flag(self):
with patch(
"amazon.opentelemetry.distro.patches._botocore_patches._apply_botocore_instrumentation_patches"
) as mock_apply_patches:
get_distribution_patch: patch = self.method_patches[GET_DISTRIBUTION_PATCH]
get_distribution_patch.side_effect = pkg_resources.DistributionNotFound
apply_instrumentation_patches()
mock_apply_patches.assert_not_called()

get_distribution_patch.side_effect = pkg_resources.VersionConflict("botocore==1.0.0", "botocore==0.0.1")
apply_instrumentation_patches()
mock_apply_patches.assert_not_called()

get_distribution_patch.side_effect = None
get_distribution_patch.return_value = "CorrectDistributionObject"
apply_instrumentation_patches()
mock_apply_patches.assert_called()

def _reset_mocks(self):
for method_patch in self.method_patches.values():
method_patch.reset_mock()


def _do_extract_kinesis_attributes() -> Dict[str, str]:
service_name: str = "kinesis"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import requests
from botocore.client import BaseClient
from botocore.config import Config
from typing_extensions import override
from typing_extensions import Tuple, override

_PORT: int = 8080
_ERROR: str = "error"
Expand Down Expand Up @@ -253,7 +253,7 @@ def prepare_aws_server() -> None:

def main() -> None:
prepare_aws_server()
server_address: tuple[str, int] = ("0.0.0.0", _PORT)
server_address: Tuple[str, int] = ("0.0.0.0", _PORT)
request_handler_class: type = RequestHandler
requests_server: ThreadingHTTPServer = ThreadingHTTPServer(server_address, request_handler_class)
atexit.register(requests_server.shutdown)
Expand Down
11 changes: 9 additions & 2 deletions contract-tests/images/applications/psycopg2/psycopg2_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from typing_extensions import override

_PORT: int = 8080
_SUCCESS: str = "success"
_DROP_TABLE: str = "drop_table"
_ERROR: str = "error"
_FAULT: str = "fault"
_CREATE_DATABASE: str = "create_database"

_DB_HOST = os.getenv("DB_HOST")
_DB_USER = os.getenv("DB_USER")
Expand All @@ -26,11 +27,17 @@ class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
status_code: int = 200
conn = psycopg2.connect(dbname=_DB_NAME, user=_DB_USER, password=_DB_PASS, host=_DB_HOST)
if self.in_path(_SUCCESS):
conn.autocommit = True # CREATE DATABASE cannot run in a transaction block
if self.in_path(_DROP_TABLE):
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS test_table")
cur.close()
status_code = 200
elif self.in_path(_CREATE_DATABASE):
cur = conn.cursor()
cur.execute("CREATE DATABASE test_database")
cur.close()
status_code = 200
elif self.in_path(_FAULT):
cur = conn.cursor()
try:
Expand Down
15 changes: 15 additions & 0 deletions contract-tests/images/applications/pymysql/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Meant to be run from aws-otel-python-instrumentation/contract-tests.
# Assumes existence of dist/aws_opentelemetry_distro-<pkg_version>-py3-none-any.whl.
# Assumes filename of aws_opentelemetry_distro-<pkg_version>-py3-none-any.whl is passed in as "DISTRO" arg.
FROM python:3.10
WORKDIR /pymysql
COPY ./dist/$DISTRO /pymysql
COPY ./contract-tests/images/applications/pymysql /pymysql

ENV PIP_ROOT_USER_ACTION=ignore
ARG DISTRO
RUN pip install --upgrade pip && pip install -r requirements.txt && pip install ${DISTRO} --force-reinstall
RUN opentelemetry-bootstrap -a install

# Without `-u`, logs will be buffered and `wait_for_logs` will never return.
CMD ["opentelemetry-instrument", "python", "-u", "./pymysql_server.py"]
Loading

0 comments on commit 09b8a2d

Please sign in to comment.