Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow attaching past evaluations that are outside of the current parameter range #185

Merged
merged 13 commits into from
Jun 11, 2024
Merged
32 changes: 32 additions & 0 deletions optimas/core/trial.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ def __init__(
self._custom_parameters = (
[] if custom_parameters is None else custom_parameters
)
self._ignored = False
self._ignored_reason = None

# Add custom parameters as trial attributes.
for param in self._custom_parameters:
Expand Down Expand Up @@ -127,6 +129,16 @@ def index(self) -> int:
def index(self, value):
self._index = value

@property
def ignored(self) -> bool:
"""Get whether the trial is ignored by the generator."""
return self._ignored

@property
def ignored_reason(self) -> str:
"""Get the reason why the trial is ignored by the generator."""
return self._ignored_reason

@property
def custom_parameters(self) -> List[TrialParameter]:
"""Get the list of custom trial parameters."""
Expand All @@ -152,6 +164,26 @@ def evaluated(self) -> bool:
"""Determine whether the trial has been evaluated."""
return self.completed or self.failed

def ignore(self, reason: str):
"""Set trial as ignored.

Parameters
----------
reason : str
The reason why the trial is ignored.
"""
# An alternative implementation of this would have been to add a new
# `IGNORED` trial status. However, this would have an issue:
# when adding old trials to an exploration, the original trial status
# could be overwritten by `IGNORED`, and this information would be lost
# for future explorations where this data is reused (for example,
# when using the `resume` option).
# With the current implementation, the value of `ignored` is controlled
# by (and only relevant for) the current exploration. It won't have
# any impact if the data is attached to a future exploration.
self._ignored = True
self._ignored_reason = reason

def mark_as(self, status) -> None:
"""Set trial status.

Expand Down
3 changes: 3 additions & 0 deletions optimas/explorations/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,9 @@ def history(self) -> pd.DataFrame:
"""Get the exploration history."""
history = convert_to_dataframe(self._libe_history.H)
ordered_columns = ["trial_index", "trial_status"]
# For backward compatibility, add trial_ignored only if available.
if "trial_ignored" in history.columns.values.tolist():
ordered_columns += ["trial_ignored"]
ordered_columns += [p.name for p in self.generator.varying_parameters]
ordered_columns += [p.name for p in self.generator.objectives]
ordered_columns += [p.name for p in self.generator.analyzed_parameters]
Expand Down
35 changes: 28 additions & 7 deletions optimas/generators/ax/service/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
GenerationStep,
GenerationStrategy,
)
from ax.exceptions.core import DataRequiredError
from ax import Arm

from optimas.core import (
Objective,
Expand Down Expand Up @@ -165,23 +165,44 @@ def _tell(self, trials: List[Trial]) -> None:
trial.varying_parameters, trial.parameter_values
):
params[var.name] = value
_, trial_id = self._ax_client.attach_trial(params)
try:
_, trial_id = self._ax_client.attach_trial(params)
except ValueError as error:
# Bypass checks from AxClient and manually add a trial
# outside of the search space.
# https://github.com/facebook/Ax/issues/768#issuecomment-1036515242
if "not a valid value" in str(error):
if self._fit_out_of_design:
ax_trial = self._ax_client.experiment.new_trial()
ax_trial.add_arm(Arm(parameters=params))
ax_trial.mark_running(no_runner_required=True)
trial_id = ax_trial.index
else:
ignore_reason = (
f"The parameters {params} are outside of the "
"range of the varying parameters. "
"Set `fit_out_of_design=True` if you want "
"the model to use these data."
)
trial.ignore(reason=ignore_reason)
continue
else:
raise error
ax_trial = self._ax_client.get_trial(trial_id)

# Since data was given externally, reduce number of
# initialization trials, but only if they have not failed.
if (
trial.status != TrialStatus.FAILED
and not self._enforce_n_init
):
if trial.completed and not self._enforce_n_init:
generation_strategy = self._ax_client.generation_strategy
current_step = generation_strategy.current_step
# Reduce only if there are still Sobol trials left.
if current_step.model == Models.SOBOL:
current_step.transition_criteria[0].threshold -= 1
generation_strategy._maybe_move_to_next_step()
finally:
if trial.completed:
if trial.ignored:
continue
elif trial.completed:
outcome_evals = {}
# Add objective evaluations.
for ev in trial.objective_evaluations:
Expand Down
24 changes: 18 additions & 6 deletions optimas/generators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,20 +250,27 @@ def tell(
incorporating the evaluated trials. By default ``True``.

"""
self._tell(trials)
for trial in trials:
if trial not in self._given_trials:
self._add_external_evaluated_trial(trial)
self._tell(trials)
for trial in trials:
if not trial.failed:

if trial.ignored:
# Check first if ignored, because it can be ignored
# and completed at the same time.
log_msg = (
f"Trial {trial.index} ignored. "
f"Reason: {trial.ignored_reason}"
)
elif trial.completed:
log_msg = "Completed trial {} with objective(s) {}".format(
trial.index, trial.objectives_as_dict()
)
if trial.analyzed_parameters:
log_msg += " and analyzed parameter(s) {}".format(
trial.analyzed_parameters_as_dict()
)
else:
elif trial.failed:
log_msg = f"Failed to evaluate trial {trial.index}."
logger.info(log_msg)
if allow_saving_model and self._save_model:
Expand All @@ -279,11 +286,15 @@ def incorporate_history(self, history: np.ndarray) -> None:

"""
# Keep only evaluations where the simulation finished successfully.
history = history[history["sim_ended"]]
history_ended = history[history["sim_ended"]]
trials = self._create_trials_from_external_data(
history, ignore_unrecognized_parameters=True
history_ended, ignore_unrecognized_parameters=True
)
self.tell(trials, allow_saving_model=False)
# Communicate to history array whether the trial has been ignored.
for trial in trials:
i = np.where(history["trial_index"] == trial.index)[0][0]
history["trial_ignored"][i] = trial.ignored

def attach_trials(
self,
Expand Down Expand Up @@ -539,6 +550,7 @@ def get_gen_specs(
[(var.name, var.dtype) for var in self._varying_parameters]
+ [("num_procs", int), ("num_gpus", int)]
+ [("trial_index", int)]
+ [("trial_ignored", bool)]
+ [
(par.save_name, par.dtype)
for par in self._custom_trial_parameters
Expand Down
68 changes: 68 additions & 0 deletions tests/test_ax_generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,73 @@ def test_ax_single_fidelity_updated_params():
make_plots(gen)


def test_ax_single_fidelity_resume():
"""
Test that an exploration with an AxService generator can resume
with an updated range of the varying parameters, even if some
old trials are out of the updated range.
"""
global trial_count
global trials_to_fail
trial_count = 0
trials_to_fail = []

fit_out_of_design_vals = [False, True]

for fit_out_of_design in fit_out_of_design_vals:
var1 = VaryingParameter("x0", 5.1, 6.0)
var2 = VaryingParameter("x1", -5.0, 15.0)
obj = Objective("f", minimize=False)
p1 = Parameter("p1")

gen = AxSingleFidelityGenerator(
varying_parameters=[var1, var2],
objectives=[obj],
analyzed_parameters=[p1],
parameter_constraints=["x0 + x1 <= 10"],
outcome_constraints=["p1 <= 30"],
fit_out_of_design=fit_out_of_design,
)
ev = FunctionEvaluator(function=eval_func_sf)
exploration = Exploration(
generator=gen,
evaluator=ev,
max_evals=20,
sim_workers=2,
exploration_dir_path="./tests_output/test_ax_single_fidelity",
libe_comms="local_threading",
resume=True,
)

# Get reference to original AxClient.
ax_client = gen._ax_client

# Run exploration.
exploration.run(n_evals=1)

if not fit_out_of_design:
# Check that no old evaluations were added
assert len(exploration.history) == 11
assert all(exploration.history.trial_ignored.to_numpy()[:-1])
# Check that the sobol step has not been skipped.
df = ax_client.get_trials_data_frame()
assert len(df) == 1
assert df["generation_method"].to_numpy()[0] == "Sobol"

else:
# Check that the old evaluations were added
assert len(exploration.history) == 12
assert not all(exploration.history.trial_ignored.to_numpy())
# Check that the sobol step has been skipped.
df = ax_client.get_trials_data_frame()
assert len(df) == 12
assert df["generation_method"].to_numpy()[-1] == "GPEI"

check_run_ax_service(
ax_client, gen, exploration, n_failed_expected=2
)


def test_ax_multi_fidelity():
"""Test that an exploration with a multifidelity generator runs"""

Expand Down Expand Up @@ -808,6 +875,7 @@ def test_ax_service_init():

if __name__ == "__main__":
test_ax_single_fidelity()
test_ax_single_fidelity_resume()
test_ax_single_fidelity_int()
test_ax_single_fidelity_moo()
test_ax_single_fidelity_fb()
Expand Down
Loading