Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reimplement output constraint scaling #10003

Merged
merged 3 commits into from
Feb 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 24 additions & 4 deletions src/ert/run_models/everest_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from everest.everest_storage import EverestStorage, OptimalResult
from everest.optimizer.everest2ropt import everest2ropt
from everest.optimizer.opt_model_transforms import (
ConstraintScaler,
ObjectiveScaler,
get_optimization_domain_transforms,
)
Expand Down Expand Up @@ -223,20 +224,39 @@ def _init_transforms(self, variables: NDArray[np.float64]) -> OptModelTransforms
transforms = get_optimization_domain_transforms(
self._everest_config.controls,
self._everest_config.objective_functions,
self._everest_config.output_constraints,
realization_weights,
)
# If required, initialize auto-scaling:
assert isinstance(transforms.objectives, ObjectiveScaler)
if transforms.objectives.has_auto_scale:
objectives, _, _ = self._run_forward_model(
assert transforms.nonlinear_constraints is None or isinstance(
transforms.nonlinear_constraints, ConstraintScaler
)
if transforms.objectives.has_auto_scale or (
transforms.nonlinear_constraints
and transforms.nonlinear_constraints.has_auto_scale
):
# Run the forward model once to find the objective/constraint values
# to compute the scales. This will add an ensemble/batch in the
# storage that is not part of the optimization run. However, the
# results may be used in the optimization via the caching mechanism.
objectives, constraints, _ = self._run_forward_model(
yngve-sk marked this conversation as resolved.
Show resolved Hide resolved
np.repeat(np.expand_dims(variables, axis=0), nreal, axis=0),
realizations,
)
transforms.objectives.calculate_auto_scales(objectives)
if transforms.objectives.has_auto_scale:
transforms.objectives.calculate_auto_scales(objectives)
if (
transforms.nonlinear_constraints
and transforms.nonlinear_constraints.has_auto_scale
):
assert constraints is not None
transforms.nonlinear_constraints.calculate_auto_scales(constraints)
return transforms

def _create_optimizer(self) -> BasicOptimizer:
# Initialize the optimization model transforms:
# Initialize the optimization model transforms. This may run one initial
# ensemble for auto-scaling purposes:
transforms = self._init_transforms(
np.asarray(
FlattenedControls(self._everest_config.controls).initial_guesses,
Expand Down
25 changes: 4 additions & 21 deletions src/everest/optimizer/everest2ropt.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ def _parse_objectives(objective_functions: list[ObjectiveFunctionConfig], ropt_c


def _parse_input_constraints(
controls: FlattenedControls,
input_constraints: list[InputConstraintConfig] | None,
formatted_control_names: list[str],
formatted_control_names_dotdash: list[str],
Expand Down Expand Up @@ -160,17 +159,13 @@ def _parse_output_constraints(
return

rhs_values: list[float] = []
scales: list[float] = []
auto_scale: list[bool] = []
types: list[ConstraintType] = []

def _add_output_constraint(
rhs_value: float | None, constraint_type: ConstraintType, suffix=None
):
if rhs_value is not None:
rhs_values.append(rhs_value)
scales.append(constr.scale if constr.scale is not None else 1.0)
auto_scale.append(constr.auto_scale or False)
types.append(constraint_type)

for constr in output_constraints:
Expand All @@ -181,25 +176,16 @@ def _add_output_constraint(
raise RuntimeError(
"output constraint error: target cannot be combined with bounds"
)
_add_output_constraint(target, ConstraintType.EQ)
yngve-sk marked this conversation as resolved.
Show resolved Hide resolved
_add_output_constraint(
target,
ConstraintType.EQ,
upper_bound, ConstraintType.LE, None if lower_bound is None else "upper"
)
_add_output_constraint(
upper_bound,
ConstraintType.LE,
None if lower_bound is None else "upper",
)
_add_output_constraint(
lower_bound,
ConstraintType.GE,
None if upper_bound is None else "lower",
lower_bound, ConstraintType.GE, None if upper_bound is None else "lower"
)

ropt_config["nonlinear_constraints"] = {
"rhs_values": rhs_values,
"scales": scales,
"auto_scale": auto_scale,
"types": types,
}

Expand Down Expand Up @@ -350,12 +336,9 @@ def everest2ropt(
"""
ropt_config: dict[str, Any] = {}

flattened_controls = FlattenedControls(ever_config.controls)

_parse_controls(flattened_controls, ropt_config)
_parse_controls(FlattenedControls(ever_config.controls), ropt_config)
yngve-sk marked this conversation as resolved.
Show resolved Hide resolved
_parse_objectives(ever_config.objective_functions, ropt_config)
_parse_input_constraints(
flattened_controls,
ever_config.input_constraints,
ever_config.formatted_control_names,
ever_config.formatted_control_names_dotdash,
Expand Down
79 changes: 76 additions & 3 deletions src/everest/optimizer/opt_model_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,19 @@

import numpy as np
from numpy.typing import NDArray
from ropt.enums import ConstraintType
from ropt.transforms import OptModelTransforms
from ropt.transforms.base import ObjectiveTransform, VariableTransform

from everest.config import ControlConfig, ObjectiveFunctionConfig
from ropt.transforms.base import (
NonLinearConstraintTransform,
ObjectiveTransform,
VariableTransform,
)

from everest.config import (
ControlConfig,
ObjectiveFunctionConfig,
OutputConstraintConfig,
)
from everest.config.utils import FlattenedControls


Expand Down Expand Up @@ -79,9 +88,58 @@ def has_auto_scale(self) -> bool:
return bool(np.any(self._auto_scales))


class ConstraintScaler(NonLinearConstraintTransform):
def __init__(
self, scales: list[float], auto_scales: list[bool], weights: list[float]
) -> None:
self._scales = np.asarray(scales, dtype=np.float64)
self._auto_scales = np.asarray(auto_scales, dtype=np.bool_)
self._weights = np.asarray(weights, dtype=np.float64)

def transform_rhs_values(
yngve-sk marked this conversation as resolved.
Show resolved Hide resolved
self, rhs_values: NDArray[np.float64], types: NDArray[np.ubyte]
) -> tuple[NDArray[np.float64], NDArray[np.ubyte]]:
def flip_type(constraint_type: ConstraintType) -> ConstraintType:
match constraint_type:
case ConstraintType.GE:
return ConstraintType.LE
case ConstraintType.LE:
return ConstraintType.GE
case _:
return constraint_type

rhs_values = rhs_values / self._scales # noqa: PLR6104
yngve-sk marked this conversation as resolved.
Show resolved Hide resolved
# Flip inequality types if self._scales < 0 in the division above:
types = np.fromiter(
(
flip_type(type_) if scale < 0 else type_
for type_, scale in zip(types, self._scales, strict=False)
),
np.ubyte,
)
return rhs_values, types

def forward(self, constraints: NDArray[np.float64]) -> NDArray[np.float64]:
return constraints / self._scales

def backward(self, constraints: NDArray[np.float64]) -> NDArray[np.float64]:
return constraints * self._scales

def calculate_auto_scales(self, constraints: NDArray[np.float64]) -> None:
auto_scales = np.abs(
np.nansum(constraints * self._weights[:, np.newaxis], axis=0)
)
self._scales[self._auto_scales] *= auto_scales[self._auto_scales]

@property
def has_auto_scale(self) -> bool:
return bool(np.any(self._auto_scales))


def get_optimization_domain_transforms(
controls: list[ControlConfig],
objectives: list[ObjectiveFunctionConfig],
constraints: list[OutputConstraintConfig] | None,
weights: list[float],
) -> OptModelTransforms:
flattened_controls = FlattenedControls(controls)
Expand All @@ -107,4 +165,19 @@ def get_optimization_domain_transforms(
],
weights,
),
nonlinear_constraints=(
ConstraintScaler(
[
1.0 if constraint.scale is None else constraint.scale
for constraint in constraints
],
[
False if constraint.auto_scale is None else constraint.auto_scale
for constraint in constraints
],
weights,
)
if constraints
else None
),
)
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@
"function_estimators": null,
"realization_filters": null,
"rhs_values": [
0.1
1.0
yngve-sk marked this conversation as resolved.
Show resolved Hide resolved
],
"scales": [
0.1
1.0
],
"types": [
2
Expand Down
39 changes: 39 additions & 0 deletions tests/everest/test_math_func.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
from pathlib import Path

import numpy as np
import pytest
import yaml

Expand Down Expand Up @@ -181,3 +182,41 @@ def test_math_func_auto_scaled_objectives(copy_math_func_test_data_to_tmp):
total = -(expected_p * 0.5 + expected_q * 0.25) / (0.5 + 0.25)

assert total == optim


@pytest.mark.integration_test
def test_math_func_auto_scaled_constraints(copy_math_func_test_data_to_tmp):
config = EverestConfig.load_file("config_advanced.yml")
config_dict = config.model_dump(exclude_none=True)

# control number of batches, no need for full convergence:
config_dict["optimization"]["convergence_tolerance"] = 1e-10
config_dict["optimization"]["max_batch_num"] = 3

# Run with auto_scaling:
config_dict["environment"]["output_folder"] = "output_auto_scale"
config_dict["output_constraints"][0]["auto_scale"] = True
config_dict["output_constraints"][0]["scale"] = 1.0
config = EverestConfig.model_validate(config_dict)
run_model = EverestRunModel.create(config)
evaluator_server_config = EvaluatorServerConfig()
run_model.run_experiment(evaluator_server_config)
result1 = run_model.result

# Run the equivalent without auto-scaling:
config_dict["environment"]["output_folder"] = "output_manual_scale"
config_dict["output_constraints"][0]["auto_scale"] = False
config_dict["output_constraints"][0]["scale"] = 0.25 # x(0)
# We need one batch less, no auto-scaling:
config_dict["optimization"]["max_batch_num"] -= 1
config = EverestConfig.model_validate(config_dict)
run_model = EverestRunModel.create(config)
evaluator_server_config = EvaluatorServerConfig()
run_model.run_experiment(evaluator_server_config)
result2 = run_model.result

assert result1.total_objective == pytest.approx(result2.total_objective)
assert np.allclose(
np.fromiter(result1.controls.values(), dtype=np.float64),
np.fromiter(result2.controls.values(), dtype=np.float64),
)
4 changes: 4 additions & 0 deletions tests/everest/test_ropt_initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def test_everest2ropt_controls_auto_scale():
transforms=get_optimization_domain_transforms(
config.controls,
config.objective_functions,
config.output_constraints,
config.model.realizations_weights,
),
)
Expand All @@ -65,6 +66,7 @@ def test_everest2ropt_variables_auto_scale():
transforms=get_optimization_domain_transforms(
config.controls,
config.objective_functions,
config.output_constraints,
config.model.realizations_weights,
),
)
Expand Down Expand Up @@ -136,6 +138,7 @@ def test_everest2ropt_controls_input_constraint_auto_scale():
transforms=get_optimization_domain_transforms(
config.controls,
config.objective_functions,
config.output_constraints,
config.model.realizations_weights,
),
)
Expand Down Expand Up @@ -280,6 +283,7 @@ def test_everest2ropt_snapshot(case, snapshot):
transforms=get_optimization_domain_transforms(
config.controls,
config.objective_functions,
config.output_constraints,
config.model.realizations_weights,
),
).model_dump()
Expand Down