Skip to content

Commit fac55ca

Browse files
committed
Update to ropt 0.11
1 parent 66b724e commit fac55ca

7 files changed

+83
-85
lines changed

pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,8 @@ everest = [
138138
"decorator",
139139
"resdata",
140140
"colorama",
141-
"ropt[pandas]>=0.1,<0.11",
142-
"ropt-dakota>=0.1,<0.11",
141+
"ropt[pandas]>=0.11,<0.12",
142+
"ropt-dakota>=0.11,<0.12",
143143
]
144144

145145
[tool.setuptools]

src/ert/run_models/everest_run_model.py

+1
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,7 @@ def run_experiment(
190190
self.ever_storage = EverestStorage(
191191
output_dir=Path(self._everest_config.optimization_output_dir),
192192
)
193+
self.ever_storage.init(self._everest_config)
193194
self.ever_storage.observe_optimizer(optimizer)
194195

195196
# Run the optimization:

src/everest/everest_storage.py

+56-39
Original file line numberDiff line numberDiff line change
@@ -350,80 +350,82 @@ def read_from_output_dir(self) -> None:
350350
exp = _OptimizerOnlyExperiment(self._output_dir)
351351
self.data.read_from_experiment(exp)
352352

353-
def observe_optimizer(
354-
self,
355-
optimizer: BasicOptimizer,
356-
) -> None:
357-
optimizer.add_observer(
358-
EventType.START_OPTIMIZER_STEP, self._on_start_optimization
359-
)
353+
def observe_optimizer(self, optimizer: BasicOptimizer) -> None:
360354
optimizer.add_observer(
361355
EventType.FINISHED_EVALUATION, self._on_batch_evaluation_finished
362356
)
363357
optimizer.add_observer(
364-
EventType.FINISHED_OPTIMIZER_STEP,
365-
self._on_optimization_finished,
358+
EventType.FINISHED_OPTIMIZER_STEP, self._on_optimization_finished
366359
)
367360

368-
def _on_start_optimization(self, event: Event) -> None:
369-
def _format_control_names(control_names):
370-
converted_names = []
371-
for name in control_names:
372-
converted = f"{name[0]}_{name[1]}"
373-
if len(name) > 2:
374-
converted += f"-{name[2]}"
375-
converted_names.append(converted)
376-
377-
return converted_names
378-
379-
config = event.config
380-
381-
# Note: We probably do not have to store
382-
# all of this information, consider removing.
361+
def init(self, everest_config: EverestConfig) -> None:
383362
self.data.controls = polars.DataFrame(
384363
{
385364
"control_name": polars.Series(
386-
_format_control_names(config.variables.names), dtype=polars.String
365+
everest_config.formatted_control_names, dtype=polars.String
387366
),
388367
}
389368
)
369+
390370
# TODO: The weight and normalization keys are only used by the everest api,
391371
# with everviz. They should be removed in the long run.
372+
weights = np.fromiter(
373+
(
374+
1.0 if obj.weight is None else obj.weight
375+
for obj in everest_config.objective_functions
376+
),
377+
dtype=np.float64,
378+
)
392379
self.data.objective_functions = polars.DataFrame(
393380
{
394-
"objective_name": config.objectives.names,
395-
"weight": polars.Series(
396-
config.objectives.weights, dtype=polars.Float64
397-
),
398-
"normalization": polars.Series( # Q: Is this correct?
399-
[1.0 / s for s in config.objectives.scales],
381+
"objective_name": everest_config.objective_names,
382+
"weight": polars.Series(weights / sum(weights), dtype=polars.Float64),
383+
"normalization": polars.Series(
384+
[
385+
1.0 if obj.normalization is None else obj.normalization
386+
for obj in everest_config.objective_functions
387+
],
400388
dtype=polars.Float64,
401389
),
402390
}
403391
)
404392

405-
if config.nonlinear_constraints is not None:
393+
if everest_config.output_constraints is not None:
406394
self.data.nonlinear_constraints = polars.DataFrame(
407395
{
408-
"constraint_name": config.nonlinear_constraints.names,
396+
"constraint_name": everest_config.constraint_names,
409397
}
410398
)
399+
else:
400+
self.data.nonlinear_constraints = None
411401

412402
self.data.realization_weights = polars.DataFrame(
413403
{
414404
"realization": polars.Series(
415-
config.realizations.names, dtype=polars.UInt32
405+
everest_config.model.realizations, dtype=polars.UInt32
416406
),
417407
}
418408
)
419409

420410
def _store_function_results(self, results: FunctionResults) -> _EvaluationResults:
411+
names = {
412+
"variable": self.data.controls["control_name"],
413+
"objective": self.data.objective_functions["objective_name"],
414+
"nonlinear_constraint": (
415+
self.data.nonlinear_constraints["constraint_name"]
416+
if self.data.nonlinear_constraints is not None
417+
else None
418+
),
419+
"realization": self.data.realization_weights["realization"],
420+
}
421+
421422
# We could select only objective values,
422423
# but we select all to also get the constraint values (if they exist)
423424
realization_objectives = polars.from_pandas(
424425
results.to_dataframe(
425426
"evaluations",
426427
select=["objectives", "evaluation_ids"],
428+
names=names,
427429
).reset_index(),
428430
).select(
429431
"batch_id",
@@ -438,6 +440,7 @@ def _store_function_results(self, results: FunctionResults) -> _EvaluationResult
438440
results.to_dataframe(
439441
"evaluations",
440442
select=["constraints", "evaluation_ids"],
443+
names=names,
441444
).reset_index(),
442445
).select(
443446
"batch_id",
@@ -452,7 +455,9 @@ def _store_function_results(self, results: FunctionResults) -> _EvaluationResult
452455
)
453456

454457
batch_constraints = polars.from_pandas(
455-
results.to_dataframe("functions", select=["constraints"]).reset_index()
458+
results.to_dataframe(
459+
"functions", select=["constraints"], names=names
460+
).reset_index()
456461
).select("batch_id", "nonlinear_constraint", "constraints")
457462

458463
batch_constraints = batch_constraints.rename(
@@ -480,12 +485,13 @@ def _store_function_results(self, results: FunctionResults) -> _EvaluationResult
480485
results.to_dataframe(
481486
"functions",
482487
select=["objectives", "weighted_objective"],
488+
names=names,
483489
).reset_index()
484490
).select("batch_id", "objective", "objectives", "weighted_objective")
485491

486492
realization_controls = polars.from_pandas(
487493
results.to_dataframe(
488-
"evaluations", select=["variables", "evaluation_ids"]
494+
"evaluations", select=["variables", "evaluation_ids"], names=names
489495
).reset_index()
490496
).select(
491497
"batch_id",
@@ -535,8 +541,19 @@ def _store_function_results(self, results: FunctionResults) -> _EvaluationResult
535541
}
536542

537543
def _store_gradient_results(self, results: GradientResults) -> _GradientResults:
544+
names = {
545+
"variable": self.data.controls["control_name"],
546+
"objective": self.data.objective_functions["objective_name"],
547+
"nonlinear_constraint": (
548+
self.data.nonlinear_constraints["constraint_name"]
549+
if self.data.nonlinear_constraints is not None
550+
else None
551+
),
552+
"realization": self.data.realization_weights["realization"],
553+
}
554+
538555
perturbation_objectives = polars.from_pandas(
539-
results.to_dataframe("evaluations").reset_index()
556+
results.to_dataframe("evaluations", names=names).reset_index()
540557
).select(
541558
[
542559
"batch_id",
@@ -560,7 +577,7 @@ def _store_gradient_results(self, results: GradientResults) -> _GradientResults:
560577

561578
if results.gradients is not None:
562579
batch_objective_gradient = polars.from_pandas(
563-
results.to_dataframe("gradients").reset_index()
580+
results.to_dataframe("gradients", names=names).reset_index()
564581
).select(
565582
[
566583
"batch_id",
@@ -678,7 +695,7 @@ def _on_batch_evaluation_finished(self, event: Event) -> None:
678695
logger.debug("Storing batch results dataframes")
679696

680697
converted_results = tuple(
681-
convert_to_maximize(result) for result in event.results
698+
convert_to_maximize(result) for result in event.data.get("results", [])
682699
)
683700
results: list[FunctionResults | GradientResults] = []
684701

src/everest/optimizer/everest2ropt.py

+20-35
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from ropt.enums import ConstraintType, PerturbationType, VariableType
77

88
from everest.config import (
9-
ControlConfig,
109
EverestConfig,
1110
InputConstraintConfig,
1211
ModelConfig,
@@ -49,15 +48,13 @@ def _parse_controls(controls: FlattenedControls, ropt_config):
4948
]
5049
indices = [idx for idx, is_enabled in enumerate(controls.enabled) if is_enabled]
5150
ropt_config["variables"] = {
52-
"names": controls.names,
5351
"types": None if all(item is None for item in control_types) else control_types,
5452
"initial_values": controls.initial_guesses,
5553
"lower_bounds": controls.lower_bounds,
5654
"upper_bounds": controls.upper_bounds,
5755
"offsets": offsets,
5856
"scales": scales,
5957
"indices": indices if indices else None,
60-
"delimiters": "_-",
6158
}
6259

6360
if "gradients" not in ropt_config:
@@ -81,62 +78,55 @@ def _parse_controls(controls: FlattenedControls, ropt_config):
8178
]
8279

8380
ropt_config["gradient"]["perturbation_types"] = [
84-
PerturbationType.SCALED.value
85-
if auto_scale
86-
else PerturbationType[perturbation_type.upper()]
87-
for perturbation_type, auto_scale in zip(
88-
controls.perturbation_types, controls.auto_scales, strict=True
89-
)
81+
PerturbationType[perturbation_type.upper()]
82+
for perturbation_type in controls.perturbation_types
9083
]
9184

9285

9386
def _parse_objectives(objective_functions: list[ObjectiveFunctionConfig], ropt_config):
94-
names: list[str] = []
9587
scales: list[float] = []
9688
auto_scale: list[bool] = []
9789
weights: list[float] = []
98-
transform_indices: list[int] = []
99-
transforms: list = []
90+
function_estimator_indices: list[int] = []
91+
function_estimators: list = []
10092

10193
for objective in objective_functions:
10294
assert isinstance(objective.name, str)
103-
names.append(objective.name)
10495
weights.append(objective.weight or 1.0)
10596
scales.append(1.0 / (objective.normalization or 1.0))
10697
auto_scale.append(objective.auto_normalize or False)
10798

10899
# If any objective specifies an objective type, we have to specify
109-
# function transforms in ropt to implement these types. This is done by
110-
# supplying a list of transforms and for each objective an index into
100+
# function estimators in ropt to implement these types. This is done by
101+
# supplying a list of estimators and for each objective an index into
111102
# that list:
112103
objective_type = objective.type
113104
if objective_type is None:
114105
objective_type = "mean"
115-
# Find the transform if it exists:
116-
transform_idx = next(
106+
# Find the estimator if it exists:
107+
function_estimator_idx = next(
117108
(
118109
idx
119-
for idx, transform in enumerate(transforms)
120-
if transform["method"] == objective_type
110+
for idx, estimator in enumerate(function_estimators)
111+
if estimator["method"] == objective_type
121112
),
122113
None,
123114
)
124-
# If not, make a new transform:
125-
if transform_idx is None:
126-
transform_idx = len(transforms)
127-
transforms.append({"method": objective_type})
128-
transform_indices.append(transform_idx)
115+
# If not, make a new estimator:
116+
if function_estimator_idx is None:
117+
function_estimator_idx = len(function_estimators)
118+
function_estimators.append({"method": objective_type})
119+
function_estimator_indices.append(function_estimator_idx)
129120

130121
ropt_config["objectives"] = {
131-
"names": names,
132122
"weights": weights,
133123
"scales": scales,
134124
"auto_scale": auto_scale,
135125
}
136-
if transforms:
126+
if function_estimators:
137127
# Only needed if we specified at least one objective type:
138-
ropt_config["objectives"]["function_transforms"] = transform_indices
139-
ropt_config["function_transforms"] = transforms
128+
ropt_config["objectives"]["function_estimators"] = function_estimator_indices
129+
ropt_config["function_estimators"] = function_estimators
140130

141131

142132
def _parse_input_constraints(
@@ -197,7 +187,6 @@ def _parse_output_constraints(
197187
if not output_constraints:
198188
return
199189

200-
names: list[str] = []
201190
rhs_values: list[float] = []
202191
scales: list[float] = []
203192
auto_scale: list[bool] = []
@@ -207,8 +196,6 @@ def _add_output_constraint(
207196
rhs_value: float | None, constraint_type: ConstraintType, suffix=None
208197
):
209198
if rhs_value is not None:
210-
name = constr.name
211-
names.append(name if suffix is None else f"{name}:{suffix}")
212199
rhs_values.append(rhs_value)
213200
scales.append(constr.scale if constr.scale is not None else 1.0)
214201
auto_scale.append(constr.auto_scale or False)
@@ -238,7 +225,6 @@ def _add_output_constraint(
238225
)
239226

240227
ropt_config["nonlinear_constraints"] = {
241-
"names": names,
242228
"rhs_values": rhs_values,
243229
"scales": scales,
244230
"auto_scale": auto_scale,
@@ -336,9 +322,9 @@ def _parse_optimization(
336322
# indices to any realization filters that should be applied. In this
337323
# case, we want all objectives and constraints to refer to the same
338324
# filter implementing cvar:
339-
objective_count = len(ropt_config["objectives"]["names"])
325+
objective_count = len(ropt_config["objectives"]["weights"])
340326
constraint_count = len(
341-
ropt_config.get("nonlinear_constraints", {}).get("names", [])
327+
ropt_config.get("nonlinear_constraints", {}).get("rhs_values", [])
342328
)
343329
ropt_config["objectives"]["realization_filters"] = objective_count * [0]
344330
if constraint_count > 0:
@@ -366,7 +352,6 @@ def _parse_model(
366352
ever_reals_weights = [1.0 / len(ever_reals)] * len(ever_reals)
367353

368354
ropt_config["realizations"] = {
369-
"names": ever_reals,
370355
"weights": ever_reals_weights,
371356
}
372357
min_real_succ = ever_opt.min_realizations_success if ever_opt else None

tests/everest/test_multiobjective.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -89,10 +89,8 @@ def test_multi_objectives2ropt(copy_mocked_test_data_to_tmp):
8989
enopt_config = EnOptConfig.model_validate(
9090
everest2ropt(EverestConfig.model_validate(config_dict))
9191
)
92-
assert len(enopt_config.objectives.names) == 2
93-
assert enopt_config.objectives.names[1] == ever_objs[1]["name"]
92+
assert len(enopt_config.objectives.weights) == 2
9493
assert enopt_config.objectives.weights[1] == ever_objs[1]["weight"] / norm
95-
assert enopt_config.objectives.names[0] == ever_objs[0]["name"]
9694
assert enopt_config.objectives.weights[0] == ever_objs[0]["weight"] / norm
9795
assert enopt_config.objectives.scales[0] == ever_objs[0]["normalization"]
9896

tests/everest/test_output_constraints.py

-1
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,6 @@ def test_upper_bound_output_constraint_def(copy_mocked_test_data_to_tmp):
222222
}
223223

224224
assert expected["scale"] == 1.0 / ropt_conf.nonlinear_constraints.scales[0]
225-
assert expected["name"] == ropt_conf.nonlinear_constraints.names[0]
226225
assert expected["rhs_value"] == ropt_conf.nonlinear_constraints.rhs_values[0]
227226
assert expected["type"] == ropt_conf.nonlinear_constraints.types[0]
228227

0 commit comments

Comments
 (0)