Skip to content

ci: test bencher

ci: test bencher #314

Workflow file for this run

name: Benchmark
on:
push:
paths-ignore: ["docs/**", "**.md"]
branches:
- main
pull_request:
paths-ignore: ["docs/**", "**.md"]
types: [opened, reopened, synchronize, labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
macro_benchmark:
name: Macro Benchmarks
if: "contains(github.event.pull_request.labels.*.name, 'ci: benchmark') || github.event_name == 'push'"
runs-on: ubuntu-latest
permissions:
pull-requests: write
contents: write
steps:
- uses: actions/checkout@v4
- name: Install Stable Toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Run Cargo Build
run: cargo build --release
- name: Run Tailcall
run: |
TAILCALL_LOG_LEVEL=error ./target/release/tailcall start ci-benchmark/benchmark.graphql &
- name: Install Wrk
run: |
sudo apt-get install -y wrk
- name: Run Test Query
run: |
curl -i -X POST -d '{"query": "{posts{title}}"}' http://localhost:8000/graphql -H "Content-Type: application/json"
- name: Warmup Wrk
working-directory: ci-benchmark
run: |
wrk -d 10 -t 4 -c 100 -s wrk.lua http://localhost:8000/graphql
- id: run_wrk
name: Run Wrk
working-directory: ci-benchmark
run: |
wrk -d 30 -t 4 -c 100 -s wrk.lua http://localhost:8000/graphql > wrk-output.txt
- id: convert_wrk_output_markdown
name: Convert Output to Markdown
working-directory: ci-benchmark
run: |
node wrk-output-to-md.js wrk-output.txt > body.md
- id: cat_md
name: Cat Markdown
working-directory: ci-benchmark
run: |
cat body.md
- name: "Upload Artifact"
uses: actions/upload-artifact@v4
with:
name: body
path: ci-benchmark/body.md
Track_and_Cache_Benchmarks:
name: Track and Cache Micro Benchmarks
if: (github.event_name == 'push' && github.ref == 'refs/heads/main')
permissions:
pull-requests: write
contents: write
runs-on: ubuntu-latest
env:
BENCHER_PROJECT: tailcall-hehe
BENCHER_ADAPTER: rust_criterion
BENCHER_TESTBED: benchmarking-runner-hehe
BASE_BENCHMARK_RESULTS: benches/base.txt
UPPER_BOUNDARY: 0.10
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install Bencher CLI
uses: bencherdev/bencher@main
- name: Run and track Benchmarks
run: |
cargo bench > benches/base.txt
bencher run \
--project "$BENCHER_PROJECT" \
--branch-reset main \
--testbed "$BENCHER_TESTBED" \
--token "${{ secrets.BENCHER_API_TOKEN }}" \
--adapter "$BENCHER_ADAPTER" \
--file "$BASE_BENCHMARK_RESULTS"
- name: Cache Criterion Benchmarks
uses: actions/cache@v4
with:
path: benches/base.txt
key: criterion_benchmarks_${{ github.sha }}
Criterion_Compare:
name: Comparing Micro Benchmarks
if: "contains(github.event.pull_request.labels.*.name, 'ci: benchmark')"
runs-on: ubuntu-latest
permissions:
pull-requests: write
contents: write
env:
BENCHER_PROJECT: tailcall-hehe
BENCHER_ADAPTER: rust_criterion
BENCHER_TESTBED: benchmarking-runner-hehe
PR_BENCHMARK_RESULTS: benches/change.txt
BASE_BENCHMARK_RESULTS: benches/base.txt
BRANCH_NAME: ${{ github.head_ref }}
UPPER_BOUNDARY: 0.10
steps:
- name: Check out code
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Restore file
uses: actions/cache@v4
with:
path: benches/base.txt
key: criterion_benchmarks_${{ github.event.pull_request.base.sha }}
fail-on-cache-miss: true
- name: Install Bencher CLI
uses: bencherdev/bencher@main
- name: Track base benchmarks
run: |
bencher run \
--branch "$BRANCH_NAME" \
--else-branch \
--token "${{ secrets.BENCHER_API_TOKEN }}" \
--file "$BASE_BENCHMARK_RESULTS"
- name: Create PR threshold
run: |
bencher threshold create \
--branch-reset "$BRANCH_NAME" \
--testbed "$BENCHER_TESTBED" \
--measure latency \
--test percentage \
--upper-boundary ${{ env.UPPER_BOUNDARY }} \
--token "${{ secrets.BENCHER_API_TOKEN }}"
- name: Run Benchmarks
run: |
cargo bench > benches/change.txt
- name: Track PR Benchmarks
run: |
bencher run \
--branch-reset "$BRANCH_NAME" \
--token "${{ secrets.BENCHER_API_TOKEN }}" \
--github-actions "${{ secrets.GITHUB_TOKEN }}" \
--err \
--file "$PR_BENCHMARK_RESULTS"