diff --git a/.github/workflows/run_benchmarks.yml b/.github/workflows/run_benchmarks.yml new file mode 100644 index 000000000..01dcdcc9b --- /dev/null +++ b/.github/workflows/run_benchmarks.yml @@ -0,0 +1,80 @@ +name: Run performance benchmarks +on: + pull_request: + types: + - labeled + - synchronize + workflow_dispatch: + +permissions: + contents: write + deployments: write + pull-requests: write + +jobs: + benchmark: + if: contains(github.event.label.name, 'benchmark') || github.event_name == 'workflow_dispatch' + name: Run pytest-benchmark + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup pixi environment + uses: prefix-dev/setup-pixi@v0.8.1 + with: + pixi-version: v0.29.0 + cache: true + auth-host: prefix.dev + auth-token: ${{ secrets.PREFIX_DEV_TOKEN }} + + - name: Run benchmarks using pixi + run: pixi run benchmarks --env test-cpu + + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Python Benchmark with pytest-benchmark + tool: 'pytest' + output-file-path: tests/output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + alert-threshold: '200%' + comment-on-alert: true + fail-on-alert: true + + - name: Convert JSON to Markdown Table + id: convert_json + run: | + python3 - < 0 else [] + rows = [[str(item[key]) for key in keys] for item in data] + + # Create the Markdown table + table_header = '| ' + ' | '.join(keys) + ' |' + table_divider = '| ' + ' | '.join(['---'] * len(keys)) + ' |' + table_rows = ['| ' + ' | '.join(row) + ' |' for row in rows] + markdown_table = '\n'.join([table_header, table_divider] + table_rows) + + # Save Markdown table as an output variable + print(f"::set-output name=markdown_table::{markdown_table}") + EOF + + - name: Update PR Description with Benchmark Results + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Get the current PR description + PR_BODY=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.number }} --jq .body) + + # Append the benchmark results to the description + UPDATED_BODY="$PR_BODY\n\n## Benchmark Results\n${{ steps.convert_json.outputs.markdown_table }}" + + # Update the PR description + gh api -X PATCH repos/${{ github.repository }}/pulls/${{ github.event.number }} -f body="$UPDATED_BODY" diff --git a/pyproject.toml b/pyproject.toml index 032f970a8..92f606df8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,7 @@ style = [ testing = [ "idyntree >= 12.2.1", "pytest >=6.0", + "pytest-benchmark", "pytest-icdiff", "robot-descriptions", ] @@ -118,7 +119,7 @@ multi_line_output = 3 profile = "black" [tool.pytest.ini_options] -addopts = "-rsxX -v --strict-markers" +addopts = "-rsxX -v --strict-markers --benchmark-skip" minversion = "6.0" testpaths = [ "tests", @@ -199,6 +200,7 @@ typing_extensions = "*" examples = { cmd = "jupyter notebook ./examples" } pipcheck = "pip check" test = { cmd = "pytest", depends_on = ["pipcheck"] } +benchmark = { cmd = "pytest --benchmark-only --benchmark-json=output.json"} [tool.pixi.feature.test.dependencies] black = "24.*" @@ -207,6 +209,7 @@ isort = "*" pip = "*" pre-commit = "*" pytest = "*" +pytest-benchmark = "*" pytest-icdiff = "*" robot_descriptions = "*" diff --git a/tests/benchmarks.py b/tests/benchmarks.py new file mode 100644 index 000000000..1b4176b6a --- /dev/null +++ b/tests/benchmarks.py @@ -0,0 +1,56 @@ +import pytest + +import jaxsim.api as js + + +# Define the test functions for pytest-benchmark +@pytest.mark.benchmark +def test_free_floating_jacobian( + jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark +): + model = jaxsim_model_ergocub_reduced + data = js.data.random_model_data(model=model) + + # Warm-up call to avoid including compilation time + js.model.generalized_free_floating_jacobian(model=model, data=data) + + benchmark(js.model.generalized_free_floating_jacobian, model, data) + + +@pytest.mark.benchmark +def test_free_floating_bias_forces( + jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark +): + model = jaxsim_model_ergocub_reduced + data = js.data.random_model_data(model=model) + + # Warm-up call to avoid including compilation time + js.model.free_floating_bias_forces(model=model, data=data) + + benchmark(js.model.free_floating_bias_forces, model, data) + + +@pytest.mark.benchmark +def test_free_floating_mass_matrix( + jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark +): + model = jaxsim_model_ergocub_reduced + data = js.data.random_model_data(model=model) + + # Warm-up call to avoid including compilation time + js.model.free_floating_mass_matrix(model=model, data=data) + + benchmark(js.model.free_floating_mass_matrix, model, data) + + +@pytest.mark.benchmark +def test_free_floating_jacobian_derivative( + jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark +): + model = jaxsim_model_ergocub_reduced + data = js.data.random_model_data(model=model) + + # Warm-up call to avoid including compilation time + js.model.generalized_free_floating_jacobian_derivative(model=model, data=data) + + benchmark(js.model.generalized_free_floating_jacobian_derivative, model, data)