Skip to content

Commit

Permalink
ci: fix e2e_benchmark comparison
Browse files Browse the repository at this point in the history
  • Loading branch information
m1ghtym0 authored Jan 25, 2024
1 parent 77276cb commit 26f6fd0
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 38 deletions.
2 changes: 2 additions & 0 deletions .github/actions/e2e_benchmark/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
benchmarks/
out/
59 changes: 26 additions & 33 deletions .github/actions/e2e_benchmark/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,27 @@ runs:
name: "knb-constellation-${{ inputs.cloudProvider }}.json"
encryptionSecret: ${{ inputs.encryptionSecret }}

- name: Parse results, create diagrams and post the progression summary
shell: bash
env:
# Original result directory
BENCH_RESULTS: out/
# Working directory containing the previous results as JSON and to contain the graphs
BDIR: benchmarks
CSP: ${{ inputs.cloudProvider }}
run: |
mkdir -p benchmarks
python .github/actions/e2e_benchmark/evaluate/parse.py
- name: Upload benchmark results to action run
if: (!env.ACT)
uses: ./.github/actions/artifact_upload
with:
path: >
benchmarks/constellation-${{ inputs.cloudProvider }}.json
name: "benchmarks"
encryptionSecret: ${{ inputs.encryptionSecret }}

- name: Assume AWS role to retrieve and update benchmarks in S3
uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1
with:
Expand All @@ -141,42 +162,24 @@ runs:
env:
CSP: ${{ inputs.cloudProvider }}
run: |
mkdir -p benchmarks
aws s3 cp --recursive ${S3_PATH} benchmarks --no-progress
if [[ -f benchmarks/constellation-${CSP}.json ]]; then
mv benchmarks/constellation-${CSP}.json benchmarks/constellation-${CSP}-previous.json
aws s3 cp --recursive ${S3_PATH} ./ --no-progress
if [[ -f constellation-${CSP}.json ]]; then
mv constellation-${CSP}.json benchmarks/constellation-${CSP}-previous.json
else
echo "::warning::Couldn't retrieve previous benchmark records from s3"
fi
- name: Parse results, create diagrams and post the progression summary
- name: Compare results
shell: bash
env:
# Original result directory
BENCH_RESULTS: out/
# Working directory containing the previous results as JSON and to contain the graphs
BDIR: benchmarks
# Paths to benchmark results as JSON of the previous run and the current run
PREV_BENCH: benchmarks/constellation-${{ inputs.cloudProvider }}-previous.json
CURR_BENCH: benchmarks/constellation-${{ inputs.cloudProvider }}.json
CSP: ${{ inputs.cloudProvider }}
run: |
python .github/actions/e2e_benchmark/evaluate/parse.py
export BENCHMARK_SUCCESS=true
if [[ -f "$PREV_BENCH" ]]; then
# Sets $BENCHMARK_SUCCESS=false if delta is bigger than defined in compare.py
# Fails if the results are outside the threshold range
python .github/actions/e2e_benchmark/evaluate/compare.py >> $GITHUB_STEP_SUMMARY
fi
echo BENCHMARK_SUCCESS=$BENCHMARK_SUCCESS >> $GITHUB_ENV
- name: Upload benchmark results to action run
if: (!env.ACT)
uses: ./.github/actions/artifact_upload
with:
path: >
benchmarks/constellation-${{ inputs.cloudProvider }}.json
name: "benchmarks"
encryptionSecret: ${{ inputs.encryptionSecret }}
- name: Upload benchmark results to opensearch
if: (!env.ACT)
Expand All @@ -199,13 +202,3 @@ runs:
CSP: ${{ inputs.cloudProvider }}
run: |
aws s3 cp benchmarks/constellation-${CSP}.json ${S3_PATH}/constellation-${CSP}.json
- name: Check performance comparison result
shell: bash
run: |
if [[ $BENCHMARK_SUCCESS == true ]] ; then
echo "Benchmark successful, all metrics in the expected range."
else
echo "::error::Benchmark failed, some metrics are outside of the expected range."
exit 1
fi
16 changes: 11 additions & 5 deletions .github/actions/e2e_benchmark/evaluate/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,15 @@

# List of allowed deviation
ALLOWED_RATIO_DELTA = {
'iops': 0.7,
'bw_kbytes': 0.7,
'tcp_bw_mbit': 0.7,
'udp_bw_mbit': 0.7,
'iops': 0.8,
'bw_kbytes': 0.8,
'tcp_bw_mbit': 0.8,
'udp_bw_mbit': 0.8,
}

# Track failed comparison status
failed = False


def is_bigger_better(bench_suite: str) -> bool:
return bench_suite in BIGGER_BETTER
Expand Down Expand Up @@ -171,14 +174,17 @@ def compare_test(self, test, subtest, metric, bench_prev, bench_curr) -> str:


def set_failed() -> None:
os.environ['COMPARISON_SUCCESS'] = str(False)
global failed
failed = True


def main():
path_prev, path_curr = get_paths()
c = BenchmarkComparer(path_prev, path_curr)
output = c.compare()
print(output)
if failed:
exit(1)


if __name__ == '__main__':
Expand Down

0 comments on commit 26f6fd0

Please sign in to comment.