diff --git a/.github/workflows/benchmarks_upload.yaml b/.github/workflows/benchmarks_upload.yaml new file mode 100644 index 0000000..6d2fe74 --- /dev/null +++ b/.github/workflows/benchmarks_upload.yaml @@ -0,0 +1,93 @@ +name: 'Benchmarks summary uploader' +on: + workflow_call: + inputs: + artifact: + required: true + type: string + default: "" + +concurrency: + group: "${{ github.ref }}-${{ github.head_ref }}-${{ github.base_ref }}" + cancel-in-progress: true + +jobs: + upload: + # we want to store only reports from tags, only those will be used for comparison + if: github.ref_type == 'tag' + runs-on: ubuntu-latest + steps: + + - name: Check out repository code + uses: actions/checkout@v3 + + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.artifact }} + path: . + + - name: Prepare upload prerequisites + run: | + mkdir -p artifacts + cp summary.json artifacts/summary.json + echo "NOW=$(date +'%Y-%m-%dT%H:%M:%S')" >> $GITHUB_ENV + + - name: Upload the site to artifactory + uses: PiwikPRO/actions/s3/upload@master + with: + aws-access-key-id: ${{ secrets.ARTIFACTORY_S3_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.ARTIFACTORY_S3_SECRET_ACCESS_KEY }} + aws-http-proxy: ${{ secrets.FORWARD_PROXY_HTTP }} + aws-https-proxy: ${{ secrets.FORWARD_PROXY_HTTPS }} + aws-bucket: piwikpro-artifactory + aws-region: eu-central-1 + src-path: artifacts + dst-path: "long/benchmarks/${{ github.repository }}/${NOW}" + + compile: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + + - name: Copy all the reports from s3 + shell: bash + env: + AWS_ACCESS_KEY_ID: ${{ secrets.ARTIFACTORY_S3_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.ARTIFACTORY_S3_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: eu-central-1 + HTTP_PROXY: ${{ secrets.FORWARD_PROXY_HTTP }} + HTTPS_PROXY: ${{ secrets.FORWARD_PROXY_HTTPS }} + run: aws s3 cp --recursive s3://piwikpro-artifactory/long/benchmarks/${{ github.repository }} reports + + - name: Prepare the report + id: compile + uses: PiwikPRO/actions/benchmark/report@pet-23-k6-action + with: + path: reports + + - name: Print the report + shell: bash + run: echo "${{ steps.compile.outputs.report }}" + + - name: Add PR comment + uses: actions/github-script@v7 + if: ${{ github.event.issue.pull_request }} + env: + REPORT: ${{ steps.compile.outputs.report }} + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const issue_number = context.issue.number; + const owner = context.repo.owner; + const repo = context.repo.repo; + + await github.rest.issues.createComment({ + owner, + repo, + issue_number, + body: process.env.REPORT + }); + console.log('Created a new comment.'); diff --git a/benchmark/k6/action.yaml b/benchmark/k6/action.yaml new file mode 100644 index 0000000..48bc060 --- /dev/null +++ b/benchmark/k6/action.yaml @@ -0,0 +1,35 @@ +name: "Piwik PRO cosign download" +description: "Download cosign binary" +inputs: + script: + required: true + description: "Path to the script, that should be launched" + vus: + required: false + description: "Number of virtual users" + duration: + required: false + description: "Duration of the test, in format like: 10s, 5m, etc" +runs: + using: "composite" + steps: + - name: Update summary of the script + shell: bash + run: cat ${{ github.action_path }}/summary.js >> ${{ inputs.script }} + + - name: Run K6 + shell: bash + run: | + command="docker run --network host -u \"$(id -u):$(id -g)\" -v \"${PWD}:/home/k6\" --rm grafana/k6 run" + + command+=" ${{ inputs.script }}" + + if [ -n "${{ inputs.vus }}" ]; then + command+=" --vus ${{ inputs.vus }}" + fi + + if [ -n "${{ inputs.duration }}" ]; then + command+=" --duration ${{ inputs.duration }}" + fi + + eval $command diff --git a/benchmark/k6/summary.js b/benchmark/k6/summary.js new file mode 100644 index 0000000..f456375 --- /dev/null +++ b/benchmark/k6/summary.js @@ -0,0 +1,47 @@ + +export function handleSummary(data) { + const nowMs = Date.now(); + var summaryObj = { + "version": "v1", + "run": { + "start_s_since_epoch": Math.floor((nowMs - data.state.testRunDurationMs) / 1000), + "end_s_since_epoch": Math.floor(nowMs / 1000), + }, + "data": { + "latency_ms_p50": { + "value": data.metrics.http_req_duration.values.med, + "description": "50th percentile latency in milliseconds", + "comparison": "lower_is_better" + }, + "latency_ms_p90":{ + "value": data.metrics.http_req_duration.values["p(90)"], + "description": "90th percentile latency in milliseconds", + "comparison": "lower_is_better" + }, + "latency_ms_p95": { + "value": data.metrics.http_req_duration.values["p(95)"], + "description": "95th percentile latency in milliseconds", + "comparison": "lower_is_better" + }, + "reqps_avg": { + "value": data.metrics.http_reqs.values.rate, + "description": "Average number of requests per second", + "comparison": "higher_is_better" + }, + "req_failure_rate": { + "value": data.metrics.http_req_failed.values.rate, + "description": "The ratio of requests that failed (0-1)", + "comparison": "lower_is_better" + }, + } + }; + try{ + const extraMetrics = getExtraMetrics(data) + summaryObj.data = Object.assign({}, summaryObj.data, extraMetrics.data) + } catch(e){ + console.log("Did not collect extra metrics: " + e) + } + return { + 'summary.json': JSON.stringify(summaryObj, null, 2), + }; + } \ No newline at end of file diff --git a/benchmark/report/action.yaml b/benchmark/report/action.yaml new file mode 100644 index 0000000..894ae7f --- /dev/null +++ b/benchmark/report/action.yaml @@ -0,0 +1,20 @@ +name: 'Benchmark summary compiler' +inputs: + path: + default: "." +outputs: + report: + description: "Compiled report" + value: ${{ steps.compile.outputs.report }} +runs: + using: 'composite' + steps: + + - name: Prepare the report + id: compile + shell: bash + run: | + REPORT=$(python ${{ github.action_path }}/script/cli.py --path ${{ inputs.path }}) + echo "report<> $GITHUB_OUTPUT + echo "${REPORT}" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT diff --git a/benchmark/report/script/.gitignore b/benchmark/report/script/.gitignore new file mode 100644 index 0000000..a8aa2ba --- /dev/null +++ b/benchmark/report/script/.gitignore @@ -0,0 +1,5 @@ +.pytest_cache/ +__pycache__/ +*.py[cod] +*$py.class +reports diff --git a/benchmark/report/script/__init__.py b/benchmark/report/script/__init__.py new file mode 100644 index 0000000..561cc34 --- /dev/null +++ b/benchmark/report/script/__init__.py @@ -0,0 +1,4 @@ +import os +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), ".")) diff --git a/benchmark/report/script/cli.py b/benchmark/report/script/cli.py new file mode 100644 index 0000000..c5bef41 --- /dev/null +++ b/benchmark/report/script/cli.py @@ -0,0 +1,14 @@ +import argparse + +import report_processor + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + epilog="""K6 benchmark and reporting automation""" + ) + + parser.add_argument("--path", default="./reports") + args = parser.parse_args() + + report_processor.process(args.path) diff --git a/benchmark/report/script/report_processor.py b/benchmark/report/script/report_processor.py new file mode 100644 index 0000000..996f973 --- /dev/null +++ b/benchmark/report/script/report_processor.py @@ -0,0 +1,95 @@ +import json +import os +import statistics + +REPORT_FILE_NAME = "summary.json" + +ROUND_PRECISION = 3 + +GREEN_DOT = "\U0001F7E2" +RED_DOT = "\U0001F534" +ARROW_UP = "\U00002197" +ARROW_DOWN = "\U00002198" +ARROW_EQ = "\U00002194" + + +def process(dir): + print("**Automated k6 Benchmark Report**") + print("--------------------------------") + report_list = sorted([ + report for report in os.listdir(dir) if os.path.isfile(get_report_uri(dir, report)) + ], reverse=True) + + last_report = json.load(open(get_report_uri(dir, report_list.pop(0))))["data"] + + compare_with_reports = [] + for i in report_list[0:5]: + old_report = json.load(open(get_report_uri(dir, i))) + compare_with_reports.append(old_report["data"]) + + calculate_avg_metrics(last_report, compare_with_reports) + + print_results(last_report, len(compare_with_reports)) + + +def print_results(last_report, previous_runs): + print("Compared to average of " + str(previous_runs) + " latest runs") + + for i in last_report: + avg = round(last_report[i]["avg"], ROUND_PRECISION) + val = round(last_report[i]["value"], ROUND_PRECISION) + cmp_logic = last_report[i]["comparison"] + description = last_report[i]["description"] + result = compare(val, avg, cmp_logic) + + print(f"{result['dot']} {description}: {avg} {result['arrow']} {val}") + + +def get_report_uri(base_dir, report_dir): + return base_dir + "/" + report_dir + "/" + REPORT_FILE_NAME + + +def calculate_avg_metrics(last_report, compare_with_reports): + for i in last_report: + try: + last_report[i]["avg"] = statistics.mean( + [metric[i]["value"] for metric in compare_with_reports] + ) + except statistics.StatisticsError: + last_report[i]["avg"] = last_report[i]["value"] + + +def compare(x, y, logic): + + match cmp(x, y): + case 1: + arrow = ARROW_UP + case -1: + arrow = ARROW_DOWN + case _: + arrow = ARROW_EQ + + match cmp(x, y) * compare_logic_from_str(logic): + case 1: + dot = GREEN_DOT + case -1: + dot = RED_DOT + case _: + dot = GREEN_DOT + + return {"dot": dot, "arrow": arrow} + + +def compare_logic_from_str(logic_name): + if logic_name == "higher_is_better": + logic = 1 + elif logic_name == "lower_is_better": + logic = -1 + else: + raise Exception("Unhandled comparison logic: " + logic_name) + + return logic + + +def cmp(x, y): + return (x > y) - (x < y)