Skip to content

Commit

Permalink
Merge pull request #16 from flferretti/feature/benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
flferretti authored Sep 20, 2024
2 parents df28248 + 6ea39be commit cfb3fa6
Show file tree
Hide file tree
Showing 3 changed files with 140 additions and 1 deletion.
80 changes: 80 additions & 0 deletions .github/workflows/run_benchmarks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
name: Run performance benchmarks
on:
pull_request:
types:
- labeled
- synchronize
workflow_dispatch:

permissions:
contents: write
deployments: write
pull-requests: write

jobs:
benchmark:
if: contains(github.event.label.name, 'benchmark') || github.event_name == 'workflow_dispatch'
name: Run pytest-benchmark
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

- name: Setup pixi environment
uses: prefix-dev/[email protected]
with:
pixi-version: v0.29.0
cache: true
auth-host: prefix.dev
auth-token: ${{ secrets.PREFIX_DEV_TOKEN }}

- name: Run benchmarks using pixi
run: pixi run benchmarks --env test-cpu

- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
name: Python Benchmark with pytest-benchmark
tool: 'pytest'
output-file-path: tests/output.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
alert-threshold: '200%'
comment-on-alert: true
fail-on-alert: true

- name: Convert JSON to Markdown Table
id: convert_json
run: |
python3 - <<EOF
import json
# Load the JSON data
with open('tests/output.json', 'r') as file:
data = json.load(file)
# Extract keys and rows
keys = data[0].keys() if len(data) > 0 else []
rows = [[str(item[key]) for key in keys] for item in data]
# Create the Markdown table
table_header = '| ' + ' | '.join(keys) + ' |'
table_divider = '| ' + ' | '.join(['---'] * len(keys)) + ' |'
table_rows = ['| ' + ' | '.join(row) + ' |' for row in rows]
markdown_table = '\n'.join([table_header, table_divider] + table_rows)
# Save Markdown table as an output variable
print(f"::set-output name=markdown_table::{markdown_table}")
EOF
- name: Update PR Description with Benchmark Results
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Get the current PR description
PR_BODY=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.number }} --jq .body)
# Append the benchmark results to the description
UPDATED_BODY="$PR_BODY\n\n## Benchmark Results\n${{ steps.convert_json.outputs.markdown_table }}"
# Update the PR description
gh api -X PATCH repos/${{ github.repository }}/pulls/${{ github.event.number }} -f body="$UPDATED_BODY"
5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ style = [
testing = [
"idyntree >= 12.2.1",
"pytest >=6.0",
"pytest-benchmark",
"pytest-icdiff",
"robot-descriptions",
]
Expand Down Expand Up @@ -118,7 +119,7 @@ multi_line_output = 3
profile = "black"

[tool.pytest.ini_options]
addopts = "-rsxX -v --strict-markers"
addopts = "-rsxX -v --strict-markers --benchmark-skip"
minversion = "6.0"
testpaths = [
"tests",
Expand Down Expand Up @@ -199,6 +200,7 @@ typing_extensions = "*"
examples = { cmd = "jupyter notebook ./examples" }
pipcheck = "pip check"
test = { cmd = "pytest", depends_on = ["pipcheck"] }
benchmark = { cmd = "pytest --benchmark-only --benchmark-json=output.json"}

[tool.pixi.feature.test.dependencies]
black = "24.*"
Expand All @@ -207,6 +209,7 @@ isort = "*"
pip = "*"
pre-commit = "*"
pytest = "*"
pytest-benchmark = "*"
pytest-icdiff = "*"
robot_descriptions = "*"

Expand Down
56 changes: 56 additions & 0 deletions tests/benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import pytest

import jaxsim.api as js


# Define the test functions for pytest-benchmark
@pytest.mark.benchmark
def test_free_floating_jacobian(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark
):
model = jaxsim_model_ergocub_reduced
data = js.data.random_model_data(model=model)

# Warm-up call to avoid including compilation time
js.model.generalized_free_floating_jacobian(model=model, data=data)

benchmark(js.model.generalized_free_floating_jacobian, model, data)


@pytest.mark.benchmark
def test_free_floating_bias_forces(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark
):
model = jaxsim_model_ergocub_reduced
data = js.data.random_model_data(model=model)

# Warm-up call to avoid including compilation time
js.model.free_floating_bias_forces(model=model, data=data)

benchmark(js.model.free_floating_bias_forces, model, data)


@pytest.mark.benchmark
def test_free_floating_mass_matrix(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark
):
model = jaxsim_model_ergocub_reduced
data = js.data.random_model_data(model=model)

# Warm-up call to avoid including compilation time
js.model.free_floating_mass_matrix(model=model, data=data)

benchmark(js.model.free_floating_mass_matrix, model, data)


@pytest.mark.benchmark
def test_free_floating_jacobian_derivative(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark
):
model = jaxsim_model_ergocub_reduced
data = js.data.random_model_data(model=model)

# Warm-up call to avoid including compilation time
js.model.generalized_free_floating_jacobian_derivative(model=model, data=data)

benchmark(js.model.generalized_free_floating_jacobian_derivative, model, data)

0 comments on commit cfb3fa6

Please sign in to comment.