Skip to content

Commit

Permalink
Use a scaler object to reimplement objective scaling
Browse files Browse the repository at this point in the history
  • Loading branch information
verveerpj committed Feb 6, 2025
1 parent b29a18e commit 5a23e90
Show file tree
Hide file tree
Showing 7 changed files with 150 additions and 33 deletions.
52 changes: 41 additions & 11 deletions src/ert/run_models/everest_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from ropt.evaluator import EvaluatorContext, EvaluatorResult
from ropt.plan import BasicOptimizer
from ropt.plan import Event as OptimizerEvent
from ropt.transforms import OptModelTransforms
from typing_extensions import TypedDict

from _ert.events import EESnapshot, EESnapshotUpdate, Event
Expand All @@ -29,9 +30,13 @@
from ert.runpaths import Runpaths
from ert.storage import open_storage
from everest.config import ControlConfig, ControlVariableGuessListConfig, EverestConfig
from everest.config.utils import FlattenedControls
from everest.everest_storage import EverestStorage, OptimalResult
from everest.optimizer.everest2ropt import everest2ropt
from everest.optimizer.opt_model_transforms import get_opt_model_transforms
from everest.optimizer.opt_model_transforms import (
ObjectiveScaler,
get_opt_model_transforms,
)
from everest.simulator.everest_to_ert import everest_to_ert_config
from everest.strings import EVEREST

Expand Down Expand Up @@ -97,11 +102,6 @@ def __init__(
)

self._everest_config = everest_config
self._opt_model_transforms = get_opt_model_transforms(everest_config.controls)
self._ropt_config = everest2ropt(
everest_config, transforms=self._opt_model_transforms
)

self._sim_callback = simulation_callback
self._opt_callback = optimization_callback
self._fm_errors: dict[int, dict[str, Any]] = {}
Expand Down Expand Up @@ -214,9 +214,38 @@ def run_experiment(
case _:
self._exit_code = EverestExitCode.COMPLETED

def _init_transforms(self, variables: NDArray[np.float64]) -> OptModelTransforms:
realizations = self._everest_config.model.realizations
nreal = len(realizations)
realization_weights = self._everest_config.model.realizations_weights
if realization_weights is None:
realization_weights = [1.0 / nreal] * nreal
transforms = get_opt_model_transforms(
self._everest_config.controls,
self._everest_config.objective_functions,
realization_weights,
)
# If required, initialize auto-scaling:
assert isinstance(transforms.objectives, ObjectiveScaler)
if transforms.objectives.has_auto_scale:
objectives, _, _ = self._run_forward_model(
np.repeat(np.expand_dims(variables, axis=0), nreal, axis=0),
realizations,
)
transforms.objectives.calculate_auto_scales(objectives)
return transforms

def _create_optimizer(self) -> BasicOptimizer:
# Initialize the optimization model transforms:
transforms = self._init_transforms(
np.asarray(
FlattenedControls(self._everest_config.controls).initial_guesses,
dtype=np.float64,
)
)
optimizer = BasicOptimizer(
enopt_config=self._ropt_config, evaluator=self._forward_model_evaluator
enopt_config=everest2ropt(self._everest_config, transforms=transforms),
evaluator=self._forward_model_evaluator,
)

# Before each batch evaluation we check if we should abort:
Expand Down Expand Up @@ -254,7 +283,7 @@ def _run_forward_model(
self,
control_values: NDArray[np.float64],
realizations: list[int],
active_control_vectors: list[bool],
active_control_vectors: list[bool] | None = None,
) -> tuple[NDArray[np.float64], NDArray[np.float64] | None, NDArray[np.intc]]:
# Reset the current run status:
self._status = None
Expand Down Expand Up @@ -348,7 +377,7 @@ def _get_cached_results(
def _init_batch_data(
self,
control_values: NDArray[np.float64],
active_control_vectors: list[bool],
active_control_vectors: list[bool] | None,
cached_results: dict[int, Any],
) -> dict[int, dict[str, Any]]:
def _add_controls(
Expand All @@ -374,7 +403,8 @@ def _add_controls(
return {
idx: _add_controls(self._everest_config.controls, control_values[idx, :])
for idx in range(control_values.shape[0])
if idx not in cached_results and active_control_vectors[idx]
if idx not in cached_results
and (active_control_vectors is None or active_control_vectors[idx])
}

def _setup_sim(
Expand Down Expand Up @@ -505,7 +535,7 @@ def _get_objectives_and_constraints(
cached_results: dict[int, Any],
) -> tuple[NDArray[np.float64], NDArray[np.float64] | None]:
# We minimize the negative of the objectives:
objectives = -self._get_simulation_results(
objectives = self._get_simulation_results(
results, self._everest_config.objective_names, control_values, batch_data
)

Expand Down
8 changes: 2 additions & 6 deletions src/everest/everest_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
import polars as pl
from ropt.enums import EventType
from ropt.plan import BasicOptimizer, Event
from ropt.results import FunctionResults, GradientResults, convert_to_maximize
from ropt.transforms import OptModelTransforms
from ropt.results import FunctionResults, GradientResults

from everest.config import EverestConfig
from everest.strings import EVEREST
Expand Down Expand Up @@ -662,14 +661,11 @@ def _store_gradient_results(self, results: GradientResults) -> _GradientResults:
def _on_batch_evaluation_finished(self, event: Event) -> None:
logger.debug("Storing batch results dataframes")

converted_results = tuple(
convert_to_maximize(result) for result in event.data.get("results", [])
)
results: list[FunctionResults | GradientResults] = []

best_value = -np.inf
best_results = None
for item in converted_results:
for item in event.data.get("results", []):
if isinstance(item, GradientResults):
results.append(item)
if (
Expand Down
6 changes: 0 additions & 6 deletions src/everest/optimizer/everest2ropt.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,13 @@ def _parse_controls(controls: FlattenedControls, ropt_config):


def _parse_objectives(objective_functions: list[ObjectiveFunctionConfig], ropt_config):
scales: list[float] = []
auto_scale: list[bool] = []
weights: list[float] = []
function_estimator_indices: list[int] = []
function_estimators: list = []

for objective in objective_functions:
assert isinstance(objective.name, str)
weights.append(objective.weight or 1.0)
scales.append(1.0 / (objective.normalization or 1.0))
auto_scale.append(objective.auto_normalize or False)

# If any objective specifies an objective type, we have to specify
# function estimators in ropt to implement these types. This is done by
Expand All @@ -95,8 +91,6 @@ def _parse_objectives(objective_functions: list[ObjectiveFunctionConfig], ropt_c

ropt_config["objectives"] = {
"weights": weights,
"scales": scales,
"auto_scale": auto_scale,
}
if function_estimators:
# Only needed if we specified at least one objective type:
Expand Down
56 changes: 52 additions & 4 deletions src/everest/optimizer/opt_model_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
import numpy as np
from numpy.typing import NDArray
from ropt.transforms import OptModelTransforms
from ropt.transforms.base import VariableTransform
from ropt.transforms.base import ObjectiveTransform, VariableTransform

from everest.config import ControlConfig
from everest.config import ControlConfig, ObjectiveFunctionConfig
from everest.config.utils import FlattenedControls


Expand Down Expand Up @@ -48,7 +48,42 @@ def transform_linear_constraints(
)


def get_opt_model_transforms(controls: list[ControlConfig]) -> OptModelTransforms:
class ObjectiveScaler(ObjectiveTransform):
def __init__(
self, scales: list[float], auto_scales: list[bool], weights: list[float]
) -> None:
self._scales = np.asarray(scales, dtype=np.float64)
self._auto_scales = np.asarray(auto_scales, dtype=np.bool_)
self._weights = np.asarray(weights, dtype=np.float64)

# The transform methods below all return the negative of the objectives.
# This is because Everest maximizes the objectives, while ropt is a minimizer.

def forward(self, objectives: NDArray[np.float64]) -> NDArray[np.float64]:
return -objectives / self._scales

def backward(self, objectives: NDArray[np.float64]) -> NDArray[np.float64]:
return -objectives * self._scales

def transform_weighted_objective(self, weighted_objective):
return -weighted_objective

def calculate_auto_scales(self, objectives: NDArray[np.float64]) -> None:
auto_scales = np.abs(
np.nansum(objectives * self._weights[:, np.newaxis], axis=0)
)
self._scales[self._auto_scales] *= auto_scales[self._auto_scales]

@property
def has_auto_scale(self) -> bool:
return bool(np.any(self._auto_scales))


def get_opt_model_transforms(
controls: list[ControlConfig],
objectives: list[ObjectiveFunctionConfig],
weights: list[float],
) -> OptModelTransforms:
flattened_controls = FlattenedControls(controls)
return OptModelTransforms(
variables=(
Expand All @@ -60,5 +95,18 @@ def get_opt_model_transforms(controls: list[ControlConfig]) -> OptModelTransform
)
if any(flattened_controls.auto_scales)
else None
)
),
objectives=ObjectiveScaler(
[
1.0
if objective.normalization is None
else 1.0 / objective.normalization
for objective in objectives
],
[
False if objective.auto_normalize is None else objective.auto_normalize
for objective in objectives
],
weights,
),
)
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
],
"realization_filters": null,
"scales": [
0.6666666666666666,
1.0,
1.0
],
"weights": [
Expand Down
29 changes: 28 additions & 1 deletion tests/everest/test_math_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,4 +158,31 @@ def test_math_func_auto_scaled_controls(
optim = -run_model.result.total_objective # distance is provided as -distance
expected_dist = 0.25**2 + 0.25**2
assert expected_dist == pytest.approx(optim, abs=0.05)
assert expected_dist == pytest.approx(optim, abs=0.05)


@pytest.mark.integration_test
def test_math_func_auto_scaled_objectives(
copy_math_func_test_data_to_tmp, evaluator_server_config_generator
):
config = EverestConfig.load_file("config_multiobj.yml")
config_dict = config.model_dump(exclude_none=True)

# Normalize only distance_p:
config_dict["objective_functions"][0]["auto_normalize"] = True
config_dict["objective_functions"][0]["normalization"] = 1.0

# We two batches, the first to do the auto-scaling,
# the second is the initial function evaluation:
config_dict["optimization"]["max_batch_num"] = 2

config = EverestConfig.model_validate(config_dict)
run_model = EverestRunModel.create(config)
evaluator_server_config = evaluator_server_config_generator(run_model)
run_model.run_experiment(evaluator_server_config)
optim = run_model.result.total_objective

expected_p = 1.0 # normalized
expected_q = 4.75 # not normalized
total = -(expected_p * 0.5 + expected_q * 0.25) / (0.5 + 0.25)

assert total == optim
30 changes: 26 additions & 4 deletions tests/everest/test_ropt_initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,12 @@ def test_everest2ropt_controls_auto_scale():
controls[0].auto_scale = True
controls[0].scaled_range = [0.3, 0.7]
ropt_config = everest2ropt(
config, transforms=get_opt_model_transforms(config.controls)
config,
transforms=get_opt_model_transforms(
config.controls,
config.objective_functions,
config.model.realizations_weights,
),
)
assert np.allclose(ropt_config.variables.lower_bounds, 0.3)
assert np.allclose(ropt_config.variables.upper_bounds, 0.7)
Expand All @@ -56,7 +61,12 @@ def test_everest2ropt_variables_auto_scale():
controls[0].variables[1].auto_scale = True
controls[0].variables[1].scaled_range = [0.3, 0.7]
ropt_config = everest2ropt(
config, transforms=get_opt_model_transforms(config.controls)
config,
transforms=get_opt_model_transforms(
config.controls,
config.objective_functions,
config.model.realizations_weights,
),
)
assert ropt_config.variables.lower_bounds[0] == 0.0
assert ropt_config.variables.upper_bounds[0] == 0.1
Expand Down Expand Up @@ -122,7 +132,12 @@ def test_everest2ropt_controls_input_constraint_auto_scale():
scaled_coefficients[:2, 1] = coefficients[:2, 1] * 2.0 / 0.4

ropt_config = everest2ropt(
config, transforms=get_opt_model_transforms(config.controls)
config,
transforms=get_opt_model_transforms(
config.controls,
config.objective_functions,
config.model.realizations_weights,
),
)
assert np.allclose(
ropt_config.linear_constraints.coefficients,
Expand Down Expand Up @@ -260,7 +275,14 @@ def test_everest2ropt_snapshot(case, snapshot):
config = EverestConfig.load_file(
relpath(f"../../test-data/everest/math_func/{case}")
)
ropt_config = everest2ropt(config).model_dump()
ropt_config = everest2ropt(
config,
transforms=get_opt_model_transforms(
config.controls,
config.objective_functions,
config.model.realizations_weights,
),
).model_dump()

def safe_default(obj):
if isinstance(obj, np.ndarray):
Expand Down

0 comments on commit 5a23e90

Please sign in to comment.