Skip to content

feat: bencher added #41

feat: bencher added

feat: bencher added #41

Workflow file for this run

name: Benchmark
on:
push:
paths-ignore: ["docs/**", "**.md"]
branches:
- main
pull_request:
paths-ignore: ["docs/**", "**.md"]
types: [opened, reopened, synchronize, labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
macro_benchmark:
name: Macro Benchmarks
if: "contains(github.event.pull_request.labels.*.name, 'ci: benchmark') || github.event_name == 'push'"
runs-on: benchmarking-runner
permissions:
pull-requests: write
contents: write
steps:
- uses: actions/checkout@v4
- name: Install Stable Toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Run Cargo Build
run: cargo build --release
- name: Run Tailcall
run: |
TAILCALL_LOG_LEVEL=error ./target/release/tailcall start ci-benchmark/benchmark.graphql &
- name: Install Wrk
run: |
sudo apt-get install -y wrk
- name: Run Test Query
run: |
curl -i -X POST -d '{"query": "{posts{title}}"}' http://localhost:8000/graphql -H "Content-Type: application/json"
- name: Warmup Wrk
working-directory: ci-benchmark
run: |
wrk -d 10 -t 4 -c 100 -s wrk.lua http://localhost:8000/graphql
- id: run_wrk
name: Run Wrk
working-directory: ci-benchmark
run: |
wrk -d 30 -t 4 -c 100 -s wrk.lua http://localhost:8000/graphql > wrk-output.txt
- id: convert_wrk_output_markdown
name: Convert Output to Markdown
working-directory: ci-benchmark
run: |
node wrk-output-to-md.js wrk-output.txt > body.md
- id: cat_md
name: Cat Markdown
working-directory: ci-benchmark
run: |
cat body.md
- name: "Upload Artifact"
uses: actions/upload-artifact@v4
with:
name: body
path: ci-benchmark/body.md
Cache_Benchmarks:
name: Cache Micro Benchmarks result
permissions:
pull-requests: write
contents: write
runs-on: benchmarking-runner
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Run Benchmarks
run: |
cargo install cargo-criterion rust-script
cargo criterion --message-format=json > benches/main_benchmarks.json
./scripts/json_to_md.rs benches/main_benchmarks.json > benches/main_benchmarks.md
cat benches/main_benchmarks.md
- name: Cache Criterion Benchmarks Json
uses: actions/cache@v4
with:
path: benches/main_benchmarks.json
key: criterion_benchmarks_${{ github.sha }}
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: json-artifact
path: benches/main_benchmarks.json
Criterion_Compare:
name: Comparing Micro Benchmarks
if: "contains(github.event.pull_request.labels.*.name, 'ci: benchmark')"
runs-on: benchmarking-runner
permissions:
pull-requests: write
contents: write
steps:
- name: Check out code
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Run Criterion Benchmarks
run: |
cargo install cargo-criterion rust-script
cargo criterion --message-format=json > benches/benchmarks.json
./scripts/json_to_md.rs benches/benchmarks.json > benches/change_benchmarks.md
- name: Print Criterion Benchmarks
run: cat benches/change_benchmarks.md
- name: Restore file
uses: actions/cache@v4
with:
path: benches/main_benchmarks.json
key: criterion_benchmarks_${{ github.event.pull_request.base.sha }}
fail-on-cache-miss: true
- name: Print Benchmark Comparision
run: ./scripts/criterion_compare.rs benches/main_benchmarks.json benches/benchmarks.json table
- name: Check Degradation
run: ./scripts/criterion_compare.rs benches/main_benchmarks.json benches/benchmarks.json check
benchmark_with_bencher:
name: Continuous Benchmarking with Bencher
permissions:
pull-requests: write
contents: write
runs-on: ubuntu-latest
env:
BENCHER_PROJECT: tailcall
BENCHER_TESTBED: ubuntu-latest
BENCHER_ADAPTER: json
BASE_BENCHMARK_RESULTS: benches/main_benchmarks.json
steps:
- name: Download Text File
uses: actions/download-artifact@v4
with:
name: json-artifact
- name: Convert Artifact
run: |
cat benches/main_benchmarks.json | jq -s 'map(select(.id and .mean and .slope and .median) | {id: .id, mean: .mean, slope: .slope, median: .median})' > benches/track_benchmark.json
- uses: bencherdev/bencher@main
- name: Track base Benchmarks
run: |
export BENCHER_API_TOKEN=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJhdWQiOiJhcGlfa2V5IiwiZXhwIjoxNzIwMDYyMTI4LCJpYXQiOjE3MTAwNjIxMjgsImlzcyI6ImJlbmNoZXIuZGV2Iiwic3ViIjoiZGFicmFsYWxhbmtyaXRAZ21haWwuY29tIiwib3JnIjpudWxsfQ.0QUtYqWgPtyYV8d6SsK2m4hNoa88r6aSI6Ojupatd3s
bencher run \
--branch 'main' \
--ci-id '${{ github.event.pull_request.number }}' \
--github-actions "${{ secrets.GITHUB_TOKEN }}" \
--err \
--file "$BASE_BENCHMARK_RESULTS"