From abdb0f5ee056b9bce8b23f515561e9dab4f39245 Mon Sep 17 00:00:00 2001 From: Andrei Malashkin Date: Thu, 28 Nov 2024 12:04:34 +0100 Subject: [PATCH] add benchmarks to nightly --- .../clang-benchmarks-linux-nix-check.yml | 40 +++++++++++++++++ .github/workflows/nightly.yml | 16 ++++++- parse_benchmarks.py | 43 +++++++++++++++++++ parse_tests.py | 2 +- 4 files changed, 99 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/clang-benchmarks-linux-nix-check.yml create mode 100644 parse_benchmarks.py diff --git a/.github/workflows/clang-benchmarks-linux-nix-check.yml b/.github/workflows/clang-benchmarks-linux-nix-check.yml new file mode 100644 index 0000000000..3d934a9b58 --- /dev/null +++ b/.github/workflows/clang-benchmarks-linux-nix-check.yml @@ -0,0 +1,40 @@ +name: Build and Test benchmark tests on Linux with clang + +on: + workflow_call: + +jobs: + build-and-test: + name: "Build and test benchmark tests on Linux with clang" + runs-on: [self-hosted, Linux, X64, aws_autoscaling] + continue-on-error: true + steps: + # https://github.com/actions/checkout/issues/1552 + - name: Clean up after previous checkout + run: chmod +w -R ${GITHUB_WORKSPACE}; rm -rf ${GITHUB_WORKSPACE}/*; + + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Run checks + run: | + nix build -L .?#checks.x86_64-linux.all-clang-benchmarks + mkdir results + cp result/test-logs/*_test.xml results/ + cp result/test-logs/*_benchmark.xml results/ + ls -l -a results/ + continue-on-error: true + env: + NIX_CONFIG: | + cores = 8 + + - name: Publish Benchmarks Test Results + uses: EnricoMi/publish-unit-test-result-action/linux@v2 + with: + check_name: "Benchmarks Test Results" + files: "results/*.xml" + comment_mode: ${{ github.event.pull_request.head.repo.fork && 'off' || 'always' }} # Don't create PR comment from fork runs + action_fail_on_inconclusive: true # fail, if no reports + diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index ff0da71f05..8b5397463b 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -18,6 +18,13 @@ jobs: always() && !cancelled() secrets: inherit + test-linux-benchmarks: + name: Linux placeholder testing and sanitize with clang + uses: ./.github/workflows/clang-benchmarks-linux-nix-check.yml + if: | + always() && !cancelled() + secrets: inherit + post-telemetry: name: Post test results in Open Telemetry format runs-on: [self-hosted, Linux, X64, aws_autoscaling] @@ -33,7 +40,6 @@ jobs: run: | ls -l -a nix build -L .?#checks.x86_64-linux.all-clang-sanitize - cat ./result/test-logs/test_errors.txt export UndefinedBehaviorSanitizer=$(grep UndefinedBehaviorSanitizer result/test-logs/test_errors.txt | wc -l) export AddressSanitizer=$(grep AddressSanitizer result/test-logs/test_errors.txt | wc -l) export LeakSanitizer=$(grep LeakSanitizer result/test-logs/test_errors.txt | wc -l) @@ -49,3 +55,11 @@ jobs: --service_name nightly-build \ python3 ./parse_tests.py + nix build -L .?#checks.x86_64-linux.all-clang-benchmarks + /home/ec2-user/.local/bin/opentelemetry-instrument \ + --traces_exporter console,otlp \ + --metrics_exporter console,otlp \ + --logs_exporter console,otlp \ + --service_name nightly-build \ + python3 ./parse_tests.py + diff --git a/parse_benchmarks.py b/parse_benchmarks.py new file mode 100644 index 0000000000..6e587fab7c --- /dev/null +++ b/parse_benchmarks.py @@ -0,0 +1,43 @@ +import logging, json +from junitparser import JUnitXml +import glob, os +from opentelemetry import trace + +aggregated_test_results = JUnitXml(); +for file in glob.glob("result/test-logs/*_benchmark.xml"): + try: + test_result = JUnitXml.fromfile(file) + result[test_result.name]=test_result.time + aggregated_test_results.append(test_result) + except Exception as ex: + print("Error processing {}".format(file)) + print(ex) + +for file in glob.glob("result/test-logs/*_benchmark.xml"): + try: + except Exception as ex: + print("Error processing {}".format(file)) + print(ex) + +succeeded = aggregated_test_results.tests - \ + aggregated_test_results.failures - \ + aggregated_test_results.errors - \ + aggregated_test_results.skipped + +result = { + "benchmark_tests" : aggregated_test_results.tests, + "benchmark_failures" : aggregated_test_results.failures, + "benchmark_errors" : aggregated_test_results.errors, + "benchmark_skipped" : aggregated_test_results.skipped, + "benchmark_succeeded" : succeeded, + "benchmark_execution_time" : aggregated_test_results.time, +} + +print("Resulting JSON: {}".format(json.dumps(result))) + +tracer = trace.get_tracer_provider().get_tracer(__name__) +with tracer.start_as_current_span("nightly_span"): + current_span = trace.get_current_span() + current_span.add_event("Nightly benchmarks build finished") + logging.getLogger().error(json.dumps(result)) + diff --git a/parse_tests.py b/parse_tests.py index fc6431f7fc..008f3bd368 100644 --- a/parse_tests.py +++ b/parse_tests.py @@ -46,6 +46,6 @@ tracer = trace.get_tracer_provider().get_tracer(__name__) with tracer.start_as_current_span("nightly_span"): current_span = trace.get_current_span() - current_span.add_event("Nightly build finished") + current_span.add_event("Nightly sanitizers build finished") logging.getLogger().error(json.dumps(result))