Skip to content

Commit

Permalink
Merge pull request #308 from ami-iit/feature/benchmark
Browse files Browse the repository at this point in the history
Add benchmark tests for model dynamics and kinematics functions
  • Loading branch information
flferretti authored Dec 6, 2024
2 parents 8c9ed49 + bb45c2c commit 3734905
Show file tree
Hide file tree
Showing 3 changed files with 159 additions and 2 deletions.
7 changes: 5 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ style = [
testing = [
"idyntree >= 12.2.1",
"pytest >=6.0",
"pytest-benchmark",
"pytest-icdiff",
"robot-descriptions"
]
Expand Down Expand Up @@ -121,7 +122,7 @@ multi_line_output = 3
profile = "black"

[tool.pytest.ini_options]
addopts = "-rsxX -v --strict-markers"
addopts = "-rsxX -v --strict-markers --benchmark-skip"
minversion = "6.0"
testpaths = [
"tests",
Expand Down Expand Up @@ -233,14 +234,16 @@ jaxsim = { path = "./", editable = true }

[tool.pixi.feature.test.tasks]
pipcheck = "pip check"
tests = { cmd = "pytest", depends_on = ["pipcheck"] }
test = { cmd = "pytest", depends_on = ["pipcheck"] }
benchmark = { cmd = "pytest --benchmark-only", depends_on = ["pipcheck"] }

[tool.pixi.feature.test.dependencies]
black = "24.*"
idyntree = "*"
isort = "*"
pre-commit = "*"
pytest = "*"
pytest-benchmark = "*"
pytest-icdiff = "*"
robot_descriptions = "*"

Expand Down
27 changes: 27 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,21 @@ def pytest_addoption(parser):
help="Run tests only if GPU is available and utilized",
)

parser.addoption(
"--batch-size",
action="store",
default="None",
help="Batch size for vectorized benchmarks (only applies to benchmark tests)",
)


def pytest_generate_tests(metafunc):
if (
"batch_size" in metafunc.fixturenames
and (batch_size := metafunc.config.getoption("--batch-size")) != "None"
):
metafunc.parametrize("batch_size", [1, int(batch_size)])


def check_gpu_usage():
# Set environment variable to prioritize GPU.
Expand Down Expand Up @@ -109,6 +124,18 @@ def velocity_representation(request) -> jaxsim.VelRepr:
return request.param


@pytest.fixture(scope="session")
def batch_size(request) -> int:
"""
Fixture providing the batch size for vectorized benchmarks.
Returns:
The batch size for vectorized benchmarks.
"""

return 1


# ================================
# Fixtures providing JaxSim models
# ================================
Expand Down
127 changes: 127 additions & 0 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
from collections.abc import Callable

import jax
import pytest

import jaxsim
import jaxsim.api as js


def vectorize_data(model: js.model.JaxSimModel, batch_size: int):
key = jax.random.PRNGKey(seed=0)

return jax.vmap(
lambda key: js.data.random_model_data(
model=model,
key=key,
)
)(jax.numpy.repeat(key[None, :], repeats=batch_size, axis=0))


def benchmark_test_function(
func: Callable, model: js.model.JaxSimModel, benchmark, batch_size
):
"""Reusability wrapper for benchmark tests."""
data = vectorize_data(model=model, batch_size=batch_size)

# Warm-up call to avoid including compilation time
jax.vmap(func, in_axes=(None, 0))(model, data)
benchmark(jax.vmap(func, in_axes=(None, 0)), model, data)


@pytest.mark.benchmark
def test_forward_dynamics_aba(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

benchmark_test_function(js.model.forward_dynamics_aba, model, benchmark, batch_size)


@pytest.mark.benchmark
def test_free_floating_bias_forces(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

benchmark_test_function(
js.model.free_floating_bias_forces, model, benchmark, batch_size
)


@pytest.mark.benchmark
def test_forward_kinematics(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

benchmark_test_function(js.model.forward_kinematics, model, benchmark, batch_size)


@pytest.mark.benchmark
def test_free_floating_mass_matrix(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

benchmark_test_function(
js.model.free_floating_mass_matrix, model, benchmark, batch_size
)


@pytest.mark.benchmark
def test_free_floating_jacobian(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

benchmark_test_function(
js.model.generalized_free_floating_jacobian, model, benchmark, batch_size
)


@pytest.mark.benchmark
def test_free_floating_jacobian_derivative(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

benchmark_test_function(
js.model.generalized_free_floating_jacobian_derivative,
model,
benchmark,
batch_size,
)


@pytest.mark.benchmark
def test_soft_contact_model(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

benchmark_test_function(js.ode.system_dynamics, model, benchmark, batch_size)


@pytest.mark.benchmark
def test_rigid_contact_model(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

with model.editable(validate=False) as model:
model.contact_model = jaxsim.rbda.contacts.RigidContacts()

benchmark_test_function(js.ode.system_dynamics, model, benchmark, batch_size)


@pytest.mark.benchmark
def test_relaxed_rigid_contact_model(
jaxsim_model_ergocub_reduced: js.model.JaxSimModel, benchmark, batch_size
):
model = jaxsim_model_ergocub_reduced

with model.editable(validate=False) as model:
model.contact_model = jaxsim.rbda.contacts.RelaxedRigidContacts()

benchmark_test_function(js.ode.system_dynamics, model, benchmark, batch_size)

0 comments on commit 3734905

Please sign in to comment.