diff --git a/recipes/007_benchmarks_advanced.py b/recipes/007_benchmarks_advanced.py index 464cb9bd..2e098b9c 100644 --- a/recipes/007_benchmarks_advanced.py +++ b/recipes/007_benchmarks_advanced.py @@ -21,6 +21,7 @@ import optuna from optunahub.benchmarks import BaseProblem +from optunahub.benchmarks import ConstrainedMixin ################################################################################################### @@ -63,6 +64,37 @@ def evaluate(self, params: dict[str, float]) -> float: study = optuna.create_study(directions=dynamic_problem.directions) study.optimize(dynamic_problem, n_trials=20) + +################################################################################################### +# Implementing a problem with constraints +# ------------------------------------------------- +# Here, let's implement a problem with constraints. +# To implement a problem with constraints, you need to inherit ``ConstrainedMixin`` class in addition to ``BaseProblem`` and implement the ``evaluate_constraints`` method. +# The ``evaluate_constraints`` method evaluates the constraint functions given a dictionary of input parameters and returns a list of constraint values. +# Then, ``ConstrainedMixin`` internally defines the ``constraints_func`` method for Optuna samplers. +class ConstrainedProblem(ConstrainedMixin, DynamicProblem): + def evaluate_constraints(self, params: dict[str, float]) -> tuple[float, float]: + x = params["x"] + c0 = x - 2 + if "y" not in params: + c1 = 0.0 # c1 <= 0, so c1 is satisfied in this case. + return c0, c1 + else: + y = params["y"] + c1 = x + y - 3 + return c0, c1 + + +################################################################################################### +# Then, you can optimize the problem with Optuna as usual. +# Don't forget to set the `constraints_func` argument to the sampler to use. +problem = ConstrainedProblem() +sampler = optuna.samplers.TPESampler( + constraints_func=problem.constraints_func +) # Pass the constraints_func to the sampler. +study = optuna.create_study(sampler=sampler, directions=problem.directions) +study.optimize(problem, n_trials=20) + ################################################################################################### # After implementing your own benchmark problem, you can register it with OptunaHub. # See :doc:`002_registration` for how to register your benchmark problem with OptunaHub.