Skip to content

Commit 5f45a05

Browse files
committed
initial pass at PTDF extension
1 parent 061756a commit 5f45a05

File tree

8 files changed

+366
-35
lines changed

8 files changed

+366
-35
lines changed

examples/uc/ptdf_ext.py

+246
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,246 @@
1+
# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
2+
# This software is distributed under the 3-clause BSD License.
3+
4+
import pyomo.environ as pyo
5+
from pyomo.common.collections import ComponentMap, ComponentSet
6+
from mpisppy.extensions.extension import Extension
7+
from mpisppy.utils.sputils import is_persistent
8+
9+
import egret.common.lazy_ptdf_utils as lpu
10+
from egret.models.unit_commitment import (_lazy_ptdf_check_violations,
11+
_lazy_ptdf_log_terminate_on_violations,
12+
_lazy_ptdf_warmstart_copy_violations,
13+
_lazy_ptdf_solve,
14+
_lazy_ptdf_normal_terminatation,
15+
_lazy_ptdf_violation_adder,
16+
)
17+
import logging
18+
from egret.common.log import logger
19+
20+
logger.setLevel(logging.ERROR)
21+
22+
class PTDFExtension(Extension):
23+
''' Abstract base class for extensions to general SPBase objects.
24+
25+
Args:
26+
ph (PHBase): The PHBase object for the current model
27+
'''
28+
def __init__(self, spopt_object, **kwargs):
29+
# attach self.opt
30+
super().__init__(spopt_object)
31+
32+
self.pre_lp_iteration_limit = kwargs.get('pre_lp_iteration_limit', 100)
33+
self.lp_iteration_limit = kwargs.get('lp_iteration_limit', 100)
34+
self.lp_cleanup_phase = kwargs.get('lp_cleanup_phase', True)
35+
self.iteration_limit = kwargs.get('iteration_limit', 100000)
36+
self.verbose = kwargs.get('verbose',False)
37+
38+
if self.verbose:
39+
logger.setLevel(logging.INFO)
40+
41+
self.initial_pass_complete = ComponentSet()
42+
self.bundling = self.opt.bundling
43+
44+
self.vars_to_load = ComponentMap()
45+
self.time_periods = ComponentMap()
46+
self.bundle_conditional_probability = ComponentMap()
47+
48+
def pre_solve(self, subproblem):
49+
if subproblem not in self.initial_pass_complete:
50+
self.spoke_name = None
51+
if self.opt.spcomm is not None:
52+
self.spoke_name = self.opt.spcomm.__class__.__name__
53+
self._initial_pass(subproblem)
54+
self.initial_pass_complete.add(subproblem)
55+
56+
def post_solve(self, subproblem, results):
57+
scenario_blocks = self._get_scenario_blocks(subproblem)
58+
termination_cond, results, iterations = \
59+
self._mip_pass(subproblem, scenario_blocks, results)
60+
return results
61+
62+
def _get_scenario_blocks(self, subproblem):
63+
if self.bundling:
64+
return tuple( subproblem.component(sname) \
65+
for sname in subproblem._ef_scenario_names )
66+
else:
67+
return ( subproblem, )
68+
69+
def _initial_pass(self, subproblem):
70+
# get vars_to_load for later
71+
scenario_blocks = self._get_scenario_blocks(subproblem)
72+
if is_persistent(subproblem._solver_plugin):
73+
subproblem_vars_to_load = []
74+
for s in scenario_blocks:
75+
for t in s.TimePeriods:
76+
b = s.TransmissionBlock[t]
77+
assert isinstance(b.p_nw, pyo.Var)
78+
subproblem_vars_to_load.extend(b.p_nw.values())
79+
80+
self.vars_to_load[subproblem] = subproblem_vars_to_load
81+
else:
82+
self.vars_to_load[subproblem] = None
83+
84+
for s in scenario_blocks:
85+
self.time_periods[s] = s.TimePeriods
86+
if self.bundling:
87+
self.bundle_conditional_probability[s] = \
88+
s._mpisppy_data.bundle_conditional_probability
89+
else:
90+
self.bundle_conditional_probability[s] = 1.
91+
92+
self.tee = ("tee-rank0-solves" in self.opt.options
93+
and self.opt.options['tee-rank0-solves']
94+
and self.opt.cylinder_rank == 0
95+
)
96+
97+
if (self.pre_lp_iteration_limit + self.lp_iteration_limit) == 0:
98+
return
99+
100+
# relax the initial subproblems
101+
for s in scenario_blocks:
102+
lpu.uc_instance_binary_relaxer(s, subproblem._solver_plugin)
103+
104+
# solve the model
105+
for k,val in self.opt.current_solver_options.items():
106+
subproblem._solver_plugin.options[k] = val
107+
108+
if is_persistent(subproblem._solver_plugin):
109+
results = subproblem._solver_plugin.solve(subproblem, tee=self.tee, save_results=False, load_solutions=False)
110+
subproblem._solver_plugin.load_vars(self.vars_to_load[subproblem])
111+
else:
112+
results = subproblem._solver_plugin.solve(subproblem, tee=self.tee, load_solutions=False)
113+
subproblem.solutions.load_from(results)
114+
115+
if self.pre_lp_iteration_limit > 0:
116+
lp_warmstart_termination_cond, results, lp_warmstart_iterations = \
117+
self._pre_lp_pass(subproblem, scenario_blocks)
118+
119+
if self.lp_iteration_limit > 0:
120+
lp_termination_cond, results, lp_iterations = \
121+
self._lp_pass(subproblem, scenario_blocks)
122+
123+
if self.lp_cleanup_phase:
124+
tot_removed = 0
125+
for s in scenario_blocks:
126+
for t,b in s.TransmissionBlock.items():
127+
tot_removed += lpu.remove_inactive(b, subproblem._solver_plugin,
128+
t, prepend_str=f"[LP cleanup phase on rank {self.opt.global_rank}] ")
129+
logger.info(f"[LP cleanup phase on rank {self.opt.global_rank} for {self.spoke_name}] removed {tot_removed} inactive flow constraint(s)")
130+
# enforce binaries in subproblems
131+
for s in scenario_blocks:
132+
lpu.uc_instance_binary_enforcer(s, subproblem._solver_plugin)
133+
134+
# mpi-sppy will solve the MIP
135+
return
136+
137+
def _do_pass(self, subproblem, scenario_blocks, time_periods, vars_to_load,
138+
prepend_str, iteration_limit, add_all_lazy_violations=False,
139+
results=None, pre_lp_cleanup=False):
140+
141+
persistent_solver = is_persistent(subproblem._solver_plugin)
142+
for i in range(iteration_limit):
143+
flows, viol_lazy = ComponentMap(), ComponentMap()
144+
terminate_this_iter, all_viol_in_model = ComponentMap(), ComponentMap()
145+
for s in scenario_blocks:
146+
flows[s], viol_num, mon_viol_num, viol_lazy[s] = \
147+
_lazy_ptdf_check_violations(s, s.model_data, time_periods[s],
148+
s._ptdf_options, prepend_str)
149+
150+
terminate_this_iter[s], all_viol_in_model[s] = \
151+
_lazy_ptdf_log_terminate_on_violations(viol_num, mon_viol_num,
152+
i, prepend_str)
153+
154+
all_viol_in_model = all(all_viol_in_model.values())
155+
terminate_this_iter = all(terminate_this_iter.values())
156+
if terminate_this_iter and not add_all_lazy_violations:
157+
if pre_lp_cleanup:
158+
results = self._pre_lp_cleanup(subproblem, scenario_blocks,
159+
persistent_solver, time_periods, prepend_str)
160+
return _lazy_ptdf_normal_terminatation(all_viol_in_model, results, i, prepend_str)
161+
162+
for s in scenario_blocks:
163+
_lazy_ptdf_violation_adder(s, s.model_data, flows[s], viol_lazy[s], time_periods[s],
164+
subproblem._solver_plugin, s._ptdf_options, prepend_str, i,
165+
obj_multi=self.bundle_conditional_probability[s])
166+
167+
if terminate_this_iter and add_all_lazy_violations:
168+
if pre_lp_cleanup:
169+
results = self._pre_lp_cleanup(subproblem, scenario_blocks,
170+
persistent_solver, time_periods, prepend_str)
171+
return _lazy_ptdf_normal_terminatation(all_viol_in_model, results, i, prepend_str)
172+
173+
results = _lazy_ptdf_solve(subproblem, subproblem._solver_plugin, persistent_solver,
174+
symbolic_solver_labels=False, solver_tee=self.tee,
175+
vars_to_load=vars_to_load, solve_method_options=None)
176+
else:
177+
if pre_lp_cleanup:
178+
results = self._pre_lp_cleanup(subproblem, scenario_blocks,
179+
persistent_solver, time_periods, prepend_str)
180+
return lpu.LazyPTDFTerminationCondition.ITERATION_LIMIT, results, i
181+
182+
def _pre_lp_cleanup(self, subproblem, scenario_blocks, persistent_solver, time_periods, prepend_str):
183+
if persistent_solver:
184+
# unpack lpu._load_pf_slacks into a single call to load_vars
185+
vars_to_load = []
186+
for s in scenario_blocks:
187+
for t in time_periods[s]:
188+
b = s.TransmissionBlock[t]
189+
vars_to_load.extend(b.pf_slack_pos.values())
190+
vars_to_load.extend(b.pf_slack_neg.values())
191+
vars_to_load.extend(b.pfi_slack_pos.values())
192+
vars_to_load.extend(b.pfi_slack_neg.values())
193+
vars_to_load.extend(b.pfc_slack_pos.values())
194+
vars_to_load.extend(b.pfc_slack_neg.values())
195+
if vars_to_load:
196+
subproblem._solver_plugin.load_vars(vars_to_load)
197+
198+
for s in scenario_blocks:
199+
_lazy_ptdf_warmstart_copy_violations(s, s.model_data, time_periods[s],
200+
subproblem._solver_plugin, s._ptdf_options, prepend_str,
201+
obj_multi=self.bundle_conditional_probability[s])
202+
203+
results = _lazy_ptdf_solve(subproblem, subproblem._solver_plugin, persistent_solver,
204+
symbolic_solver_labels=False, solver_tee=self.tee,
205+
vars_to_load=self.vars_to_load[subproblem],
206+
solve_method_options=None)
207+
return results
208+
209+
def _pre_lp_pass(self, subproblem, scenario_blocks):
210+
vars_to_load_t_subset = ComponentMap()
211+
t_subset = ComponentMap()
212+
persistent_solver = is_persistent(subproblem._solver_plugin)
213+
214+
if persistent_solver:
215+
vars_to_load = []
216+
else:
217+
vars_to_load = None
218+
for s in scenario_blocks:
219+
max_demand_time = max(s.TotalDemand, key=s.TotalDemand.__getitem__)
220+
t_subset[s] = [max_demand_time,]
221+
if persistent_solver:
222+
transmission_block = s.TransmissionBlock[max_demand_time]
223+
assert isinstance(transmission_block.p_nw, pyo.Var)
224+
vars_to_load.extend(transmission_block.p_nw.values())
225+
226+
return self._do_pass(subproblem, scenario_blocks, t_subset, vars_to_load,
227+
prepend_str=f"[LP warmstart phase on rank {self.opt.global_rank} for {self.spoke_name}] ",
228+
iteration_limit=self.pre_lp_iteration_limit,
229+
add_all_lazy_violations=False,
230+
results=None, pre_lp_cleanup=True)
231+
232+
def _lp_pass(self, subproblem, scenario_blocks):
233+
return self._do_pass(subproblem, scenario_blocks,
234+
self.time_periods, self.vars_to_load[subproblem],
235+
prepend_str=f"[LP phase on rank {self.opt.global_rank} for {self.spoke_name}] ",
236+
iteration_limit=self.lp_iteration_limit,
237+
add_all_lazy_violations=True,
238+
results=None, pre_lp_cleanup=False)
239+
240+
def _mip_pass(self, subproblem, scenario_blocks, results):
241+
return self._do_pass(subproblem, scenario_blocks,
242+
self.time_periods, self.vars_to_load[subproblem],
243+
prepend_str=f"[MIP phase on rank {self.opt.global_rank} for {self.spoke_name}] ",
244+
iteration_limit=self.iteration_limit,
245+
add_all_lazy_violations=False,
246+
results=results, pre_lp_cleanup=False)

examples/uc/uc_cylinders.py

+23-5
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from mpisppy.utils import vanilla
2121
from mpisppy.extensions.cross_scen_extension import CrossScenarioExtension
2222

23+
from ptdf_ext import PTDFExtension
2324

2425
def _parse_args():
2526
parser = baseparsers.make_parser("uc_cylinders")
@@ -30,6 +31,7 @@ def _parse_args():
3031
parser = baseparsers.xhatlooper_args(parser)
3132
parser = baseparsers.xhatshuffle_args(parser)
3233
parser = baseparsers.cross_scenario_cuts_args(parser)
34+
parser = baseparsers.mip_options(parser)
3335
parser.add_argument("--ph-mipgaps-json",
3436
help="json file with mipgap schedule (default None)",
3537
dest="ph_mipgaps_json",
@@ -47,6 +49,12 @@ def _parse_args():
4749
action='store_true',
4850
dest='xhat_closest_tree',
4951
default=False)
52+
parser.add_argument("--add-contingency-constraints",
53+
help="Use EGRET to monitor all possible"
54+
" non-disconnecting contingencies (default False)",
55+
action='store_true',
56+
dest='add_contingency_constraints',
57+
default=False)
5058
args = parser.parse_args()
5159
return args
5260

@@ -65,15 +73,19 @@ def main():
6573
fixer_tol = args.fixer_tol
6674
with_cross_scenario_cuts = args.with_cross_scenario_cuts
6775

68-
scensavail = [3,5,10,25,50,100]
76+
scensavail = [3,4,5,10,25,50,100]
6977
if num_scen not in scensavail:
7078
raise RuntimeError("num-scen was {}, but must be in {}".\
7179
format(num_scen, scensavail))
7280

7381
scenario_creator_kwargs = {
74-
"scenario_count": num_scen,
75-
"path": str(num_scen) + "scenarios_r1",
76-
}
82+
"scenario_count": num_scen,
83+
"path": str(num_scen) + "scenarios_r1",
84+
"add_contingency_constraints": args.add_contingency_constraints,
85+
}
86+
if num_scen == 4:
87+
scenario_creator_kwargs["path"] = str(num_scen) + "scenarios_rtsgmlc"
88+
7789
scenario_creator = uc.scenario_creator
7890
scenario_denouement = uc.scenario_denouement
7991
all_scenario_names = [f"Scenario{i+1}" for i in range(num_scen)]
@@ -90,7 +102,7 @@ def main():
90102
rho_setter = rho_setter)
91103

92104
# Extend and/or correct the vanilla dictionary
93-
ext_classes = [Gapper]
105+
ext_classes = [Gapper, PTDFExtension]
94106
if with_fixer:
95107
ext_classes.append(Fixer)
96108
if with_cross_scenario_cuts:
@@ -134,24 +146,30 @@ def main():
134146
# FWPH spoke
135147
if with_fwph:
136148
fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs)
149+
fw_spoke["opt_kwargs"]["extensions"] = PTDFExtension
137150

138151
# Standard Lagrangian bound spoke
139152
if with_lagrangian:
140153
lagrangian_spoke = vanilla.lagrangian_spoke(*beans,
141154
scenario_creator_kwargs=scenario_creator_kwargs,
142155
rho_setter = rho_setter)
156+
lagrangian_spoke["opt_kwargs"]["extensions"] = PTDFExtension
143157

144158
# xhat looper bound spoke
145159
if with_xhatlooper:
146160
xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs)
161+
xhatlooper_spoke["opt_kwargs"]["extensions"] = PTDFExtension
147162

148163
# xhat shuffle bound spoke
149164
if with_xhatshuffle:
150165
xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs)
166+
xhatshuffle_spoke["opt_kwargs"]["extensions"] = PTDFExtension
151167

152168
# cross scenario cut spoke
153169
if with_cross_scenario_cuts:
154170
cross_scenario_cut_spoke = vanilla.cross_scenario_cut_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs)
171+
cross_scenario_cut_spoke["opt_kwargs"]["extensions"] = PTDFExtension
172+
155173

156174
list_of_spoke_dict = list()
157175
if with_fwph:

0 commit comments

Comments
 (0)