Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pet 23 k6 action #88

Merged
merged 7 commits into from
Apr 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions .github/workflows/benchmarks_upload.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
name: 'Benchmarks summary uploader'
on:
workflow_call:
inputs:
artifact:
required: true
type: string
default: ""

concurrency:
group: "${{ github.ref }}-${{ github.head_ref }}-${{ github.base_ref }}"
cancel-in-progress: true

jobs:
upload:
# we want to store only reports from tags, only those will be used for comparison
if: github.ref_type == 'tag'
runs-on: ubuntu-latest
steps:

- name: Check out repository code
uses: actions/checkout@v3

- name: Download artifact
uses: actions/download-artifact@v3
with:
name: ${{ inputs.artifact }}
path: .

- name: Prepare upload prerequisites
run: |
mkdir -p artifacts
cp summary.json artifacts/summary.json
echo "NOW=$(date +'%Y-%m-%dT%H:%M:%S')" >> $GITHUB_ENV

- name: Upload the site to artifactory
uses: PiwikPRO/actions/s3/upload@master
with:
aws-access-key-id: ${{ secrets.ARTIFACTORY_S3_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ARTIFACTORY_S3_SECRET_ACCESS_KEY }}
aws-http-proxy: ${{ secrets.FORWARD_PROXY_HTTP }}
aws-https-proxy: ${{ secrets.FORWARD_PROXY_HTTPS }}
aws-bucket: piwikpro-artifactory
aws-region: eu-central-1
src-path: artifacts
dst-path: "long/benchmarks/${{ github.repository }}/${NOW}"

compile:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:

- name: Copy all the reports from s3
shell: bash
env:
AWS_ACCESS_KEY_ID: ${{ secrets.ARTIFACTORY_S3_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.ARTIFACTORY_S3_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: eu-central-1
HTTP_PROXY: ${{ secrets.FORWARD_PROXY_HTTP }}
HTTPS_PROXY: ${{ secrets.FORWARD_PROXY_HTTPS }}
run: aws s3 cp --recursive s3://piwikpro-artifactory/long/benchmarks/${{ github.repository }} reports

- name: Prepare the report
id: compile
uses: PiwikPRO/actions/benchmark/report@pet-23-k6-action
with:
path: reports

- name: Print the report
shell: bash
run: echo "${{ steps.compile.outputs.report }}"

- name: Add PR comment
uses: actions/github-script@v7
if: ${{ github.event.issue.pull_request }}
env:
REPORT: ${{ steps.compile.outputs.report }}
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const issue_number = context.issue.number;
const owner = context.repo.owner;
const repo = context.repo.repo;

await github.rest.issues.createComment({
owner,
repo,
issue_number,
body: process.env.REPORT
});
console.log('Created a new comment.');
35 changes: 35 additions & 0 deletions benchmark/k6/action.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
name: "Piwik PRO cosign download"
description: "Download cosign binary"
inputs:
script:
required: true
description: "Path to the script, that should be launched"
vus:
required: false
description: "Number of virtual users"
duration:
required: false
description: "Duration of the test, in format like: 10s, 5m, etc"
runs:
using: "composite"
steps:
- name: Update summary of the script
shell: bash
run: cat ${{ github.action_path }}/summary.js >> ${{ inputs.script }}

- name: Run K6
shell: bash
run: |
command="docker run --network host -u \"$(id -u):$(id -g)\" -v \"${PWD}:/home/k6\" --rm grafana/k6 run"

command+=" ${{ inputs.script }}"

if [ -n "${{ inputs.vus }}" ]; then
command+=" --vus ${{ inputs.vus }}"
fi

if [ -n "${{ inputs.duration }}" ]; then
command+=" --duration ${{ inputs.duration }}"
fi

eval $command
47 changes: 47 additions & 0 deletions benchmark/k6/summary.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@

export function handleSummary(data) {
const nowMs = Date.now();
var summaryObj = {
"version": "v1",
"run": {
"start_s_since_epoch": Math.floor((nowMs - data.state.testRunDurationMs) / 1000),
"end_s_since_epoch": Math.floor(nowMs / 1000),
},
"data": {
"latency_ms_p50": {
"value": data.metrics.http_req_duration.values.med,
"description": "50th percentile latency in milliseconds",
"comparison": "lower_is_better"
},
"latency_ms_p90":{
"value": data.metrics.http_req_duration.values["p(90)"],
"description": "90th percentile latency in milliseconds",
"comparison": "lower_is_better"
},
"latency_ms_p95": {
"value": data.metrics.http_req_duration.values["p(95)"],
"description": "95th percentile latency in milliseconds",
"comparison": "lower_is_better"
},
"reqps_avg": {
"value": data.metrics.http_reqs.values.rate,
"description": "Average number of requests per second",
"comparison": "higher_is_better"
},
"req_failure_rate": {
"value": data.metrics.http_req_failed.values.rate,
"description": "The ratio of requests that failed (0-1)",
"comparison": "lower_is_better"
},
}
};
try{
const extraMetrics = getExtraMetrics(data)
summaryObj.data = Object.assign({}, summaryObj.data, extraMetrics.data)
} catch(e){
console.log("Did not collect extra metrics: " + e)
}
return {
'summary.json': JSON.stringify(summaryObj, null, 2),
};
}
20 changes: 20 additions & 0 deletions benchmark/report/action.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
name: 'Benchmark summary compiler'
inputs:
path:
default: "."
outputs:
report:
description: "Compiled report"
value: ${{ steps.compile.outputs.report }}
runs:
using: 'composite'
steps:

- name: Prepare the report
id: compile
shell: bash
run: |
REPORT=$(python ${{ github.action_path }}/script/cli.py --path ${{ inputs.path }})
echo "report<<EOF" >> $GITHUB_OUTPUT
echo "${REPORT}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
5 changes: 5 additions & 0 deletions benchmark/report/script/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
.pytest_cache/
__pycache__/
*.py[cod]
*$py.class
reports
4 changes: 4 additions & 0 deletions benchmark/report/script/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), "."))
14 changes: 14 additions & 0 deletions benchmark/report/script/cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import argparse

import report_processor


if __name__ == "__main__":
parser = argparse.ArgumentParser(
epilog="""K6 benchmark and reporting automation"""
)

parser.add_argument("--path", default="./reports")
args = parser.parse_args()

report_processor.process(args.path)
95 changes: 95 additions & 0 deletions benchmark/report/script/report_processor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import json
import os
import statistics

REPORT_FILE_NAME = "summary.json"

ROUND_PRECISION = 3

GREEN_DOT = "\U0001F7E2"
RED_DOT = "\U0001F534"
ARROW_UP = "\U00002197"
ARROW_DOWN = "\U00002198"
ARROW_EQ = "\U00002194"


def process(dir):
print("**Automated k6 Benchmark Report**")
print("--------------------------------")
report_list = sorted([
report for report in os.listdir(dir) if os.path.isfile(get_report_uri(dir, report))
], reverse=True)

last_report = json.load(open(get_report_uri(dir, report_list.pop(0))))["data"]

compare_with_reports = []
for i in report_list[0:5]:
old_report = json.load(open(get_report_uri(dir, i)))
compare_with_reports.append(old_report["data"])

calculate_avg_metrics(last_report, compare_with_reports)

print_results(last_report, len(compare_with_reports))


def print_results(last_report, previous_runs):
print("Compared to average of " + str(previous_runs) + " latest runs")

for i in last_report:
avg = round(last_report[i]["avg"], ROUND_PRECISION)
val = round(last_report[i]["value"], ROUND_PRECISION)
cmp_logic = last_report[i]["comparison"]
description = last_report[i]["description"]
result = compare(val, avg, cmp_logic)

print(f"{result['dot']} {description}: {avg} {result['arrow']} {val}")


def get_report_uri(base_dir, report_dir):
return base_dir + "/" + report_dir + "/" + REPORT_FILE_NAME


def calculate_avg_metrics(last_report, compare_with_reports):
for i in last_report:
try:
last_report[i]["avg"] = statistics.mean(
[metric[i]["value"] for metric in compare_with_reports]
)
except statistics.StatisticsError:
last_report[i]["avg"] = last_report[i]["value"]


def compare(x, y, logic):

match cmp(x, y):
case 1:
arrow = ARROW_UP
case -1:
arrow = ARROW_DOWN
case _:
arrow = ARROW_EQ

match cmp(x, y) * compare_logic_from_str(logic):
case 1:
dot = GREEN_DOT
case -1:
dot = RED_DOT
case _:
dot = GREEN_DOT

return {"dot": dot, "arrow": arrow}


def compare_logic_from_str(logic_name):
if logic_name == "higher_is_better":
logic = 1
elif logic_name == "lower_is_better":
logic = -1
else:
raise Exception("Unhandled comparison logic: " + logic_name)

return logic


def cmp(x, y):
return (x > y) - (x < y)