diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index f33f1045c..6beebf38c 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -147,7 +147,8 @@ using Tables: Tables, AbstractRow, columntable using StructArrays: StructVector # OrderedSet is used to store the order of the substances in the network. -using DataStructures: OrderedSet +# OrderedDict is used to store the order of the sources in a subnetwork. +using DataStructures: OrderedSet, OrderedDict export libribasim diff --git a/core/src/allocation_init.jl b/core/src/allocation_init.jl index 44582e8ed..90e033229 100644 --- a/core/src/allocation_init.jl +++ b/core/src/allocation_init.jl @@ -389,56 +389,6 @@ function add_constraints_conservation_node!( return nothing end -""" -Add the fractional flow constraints to the allocation problem. -The constraint indices are allocation edges over a fractional flow node. - -Constraint: -flow after fractional_flow node <= fraction * inflow -""" -function add_constraints_fractional_flow!( - problem::JuMP.Model, - p::Parameters, - subnetwork_id::Int32, -)::Nothing - (; graph, fractional_flow) = p - F = problem[:F] - node_ids = graph[].node_ids[subnetwork_id] - - # Find the nodes in this subnetwork with a FractionalFlow - # outneighbor, and collect the corresponding flow fractions - # and inflow variable - edges_to_fractional_flow = Tuple{NodeID, NodeID}[] - fractions = Dict{Tuple{NodeID, NodeID}, Float64}() - inflows = Dict{NodeID, JuMP.AffExpr}() - - # Find edges of the form (node_id, outflow_id) where outflow_id - # is for a FractionalFlow node - for node_id in node_ids - for outflow_id in outflow_ids(graph, node_id) - if outflow_id.type == NodeType.FractionalFlow - edge = (node_id, outflow_id) - push!(edges_to_fractional_flow, edge) - fractions[edge] = fractional_flow.fraction[outflow_id.idx] - inflows[node_id] = sum([ - F[(inflow_id, node_id)] for inflow_id in inflow_ids(graph, node_id) - ]) - end - end - end - - # Create the constraints if there is at least one - if !isempty(edges_to_fractional_flow) - problem[:fractional_flow] = JuMP.@constraint( - problem, - [edge = edges_to_fractional_flow], - F[edge] <= fractions[edge] * inflows[edge[1]], - base_name = "fractional_flow" - ) - end - return nothing -end - """ Add the Basin flow constraints to the allocation problem. The constraint indices are the Basin node IDs. @@ -475,37 +425,6 @@ function add_constraints_buffer!(problem::JuMP.Model)::Nothing return nothing end -""" -Add the flow demand node outflow constraints to the allocation problem. -The constraint indices are the node IDs of the nodes that have a flow demand. - -Constraint: -flow out of node with flow demand <= ∞ if not at flow demand priority, 0.0 otherwise -""" -function add_constraints_flow_demand_outflow!( - problem::JuMP.Model, - p::Parameters, - subnetwork_id::Int32, -)::Nothing - (; graph) = p - F = problem[:F] - node_ids = graph[].node_ids[subnetwork_id] - - # Collect the node IDs in the subnetwork which have a flow demand - node_ids_flow_demand = [ - node_id for - node_id in node_ids if has_external_demand(graph, node_id, :flow_demand)[1] - ] - - problem[:flow_demand_outflow] = JuMP.@constraint( - problem, - [node_id = node_ids_flow_demand], - F[(node_id, outflow_id(graph, node_id))] <= 0.0, - base_name = "flow_demand_outflow" - ) - return nothing -end - """ Construct the allocation problem for the current subnetwork as a JuMP model. """ @@ -537,12 +456,71 @@ function allocation_problem( add_constraints_source!(problem, p, subnetwork_id) add_constraints_user_source!(problem, p, subnetwork_id) add_constraints_basin_flow!(problem) - add_constraints_flow_demand_outflow!(problem, p, subnetwork_id) add_constraints_buffer!(problem) return problem end +""" +Get the sources within the subnetwork in the order in which they will +be optimized over. +TODO: Get preferred source order from input +""" +function get_sources_in_order( + problem::JuMP.Model, + p::Parameters, + subnetwork_id::Integer, +)::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource} + # NOTE: return flow has to be done before other sources, to prevent that + # return flow is directly used within the same priority + + (; basin, user_demand, graph, allocation) = p + + sources = OrderedDict{Tuple{NodeID, NodeID}, AllocationSource}() + + # User return flow + for node_id in sort(only(problem[:source_user].axes)) + edge = user_demand.outflow_edge[node_id.idx].edge + sources[edge] = AllocationSource(; edge, type = AllocationSourceType.user_return) + end + + # Source edges (within subnetwork) + for edge in + sort(only(problem[:source].axes); by = edge -> (edge[1].value, edge[2].value)) + if graph[edge[1]].subnetwork_id == graph[edge[2]].subnetwork_id + sources[edge] = AllocationSource(; edge, type = AllocationSourceType.edge) + end + end + + # Basins with level demand + for node_id in basin.node_id + if (graph[node_id].subnetwork_id == subnetwork_id) && + has_external_demand(graph, node_id, :level_demand)[1] + edge = (node_id, node_id) + sources[edge] = AllocationSource(; edge, type = AllocationSourceType.basin) + end + end + + # Main network to subnetwork connections + for edge in sort( + collect(keys(allocation.subnetwork_demands)); + by = edge -> (edge[1].value, edge[2].value), + ) + if graph[edge[2]].subnetwork_id == subnetwork_id + sources[edge] = + AllocationSource(; edge, type = AllocationSourceType.main_to_sub) + end + end + + # Buffers + for node_id in sort(only(problem[:F_flow_buffer_out].axes)) + edge = (node_id, node_id) + sources[edge] = AllocationSource(; edge, type = AllocationSourceType.buffer) + end + + sources +end + """ Construct the JuMP.jl problem for allocation. @@ -563,7 +541,8 @@ function AllocationModel( )::AllocationModel capacity = get_capacity(p, subnetwork_id) problem = allocation_problem(p, capacity, subnetwork_id) - flow_priority = JuMP.Containers.SparseAxisArray(Dict(only(problem[:F].axes) .=> 0.0)) + sources = get_sources_in_order(problem, p, subnetwork_id) + flow = JuMP.Containers.SparseAxisArray(Dict(only(problem[:F].axes) .=> 0.0)) - return AllocationModel(; subnetwork_id, capacity, flow_priority, problem, Δt_allocation) + return AllocationModel(; subnetwork_id, capacity, flow, sources, problem, Δt_allocation) end diff --git a/core/src/allocation_optim.jl b/core/src/allocation_optim.jl index 23b83d8ca..5c7bb3a81 100644 --- a/core/src/allocation_optim.jl +++ b/core/src/allocation_optim.jl @@ -1,9 +1,9 @@ @enumx OptimizationType internal_sources collect_demands allocate """ -Add an objective term `demand * (1 - flow/demand)^2`. If the absolute +Add an objective term `demand * (1 - flow/demand)²`. If the absolute value of the demand is very small, this would lead to huge coefficients, -so in that case a term of the form (flow - demand)^2 is used. +so in that case a term of the form (flow - demand)² is used. """ function add_objective_term!( ex::JuMP.QuadExpr, @@ -11,12 +11,12 @@ function add_objective_term!( F::JuMP.VariableRef, )::Nothing if abs(demand) < 1e-5 - # Error term (F - d)^2 = F² - 2dF + d² + # Error term (F - d)² = F² - 2dF + d² JuMP.add_to_expression!(ex, 1.0, F, F) JuMP.add_to_expression!(ex, -2.0 * demand, F) JuMP.add_to_expression!(ex, demand^2) else - # Error term d*(1 - F/d)^2 = F²/d - 2F + d + # Error term d*(1 - F/d)² = F²/d - 2F + d JuMP.add_to_expression!(ex, 1.0 / demand, F, F) JuMP.add_to_expression!(ex, -2.0, F) JuMP.add_to_expression!(ex, demand) @@ -29,8 +29,8 @@ Set the objective for the given priority. """ function set_objective_priority!( allocation_model::AllocationModel, - p::Parameters, u::ComponentVector, + p::Parameters, t::Float64, priority_idx::Int, )::Nothing @@ -39,6 +39,7 @@ function set_objective_priority!( (; node_id, demand_reduced) = user_demand (; main_network_connections, subnetwork_demands) = allocation F = problem[:F] + F_flow_buffer_in = problem[:F_flow_buffer_in] # Initialize an empty quadratic expression for the objective ex = JuMP.QuadExpr() @@ -75,7 +76,7 @@ function set_objective_priority!( priority_idx == flow_priority_idx ? flow_demand.demand[demand_node_id.idx] : 0.0 - F_fd = F[edge] + F_fd = F_flow_buffer_in[to_node_id] add_objective_term!(ex, d, F_fd) end end @@ -100,7 +101,7 @@ function set_objective_priority!( end """ -Assign the allocations to the UserDemand as determined by the solution of the allocation problem. +Assign the allocations to the UserDemand or subnetwork as determined by the solution of the allocation problem. """ function assign_allocations!( allocation_model::AllocationModel, @@ -108,7 +109,7 @@ function assign_allocations!( priority_idx::Int, optimization_type::OptimizationType.T, )::Nothing - (; problem, subnetwork_id, capacity) = allocation_model + (; subnetwork_id, capacity, flow) = allocation_model (; graph, user_demand, allocation) = p (; subnetwork_demands, @@ -117,7 +118,6 @@ function assign_allocations!( main_network_connections, ) = allocation main_network_source_edges = get_main_network_connections(p, subnetwork_id) - F = problem[:F] for edge in keys(capacity.data) # If this edge does not exist in the physical model then it comes from a # bidirectional edge, and thus does not have directly allocating flow @@ -129,13 +129,13 @@ function assign_allocations!( if optimization_type == OptimizationType.collect_demands if graph[edge...].subnetwork_id_source == subnetwork_id && edge ∈ main_network_source_edges - allocated = JuMP.value(F[edge]) + allocated = flow[edge] subnetwork_demands[edge][priority_idx] += allocated end elseif optimization_type == OptimizationType.allocate user_demand_node_id = edge[2] if user_demand_node_id.type == NodeType.UserDemand - allocated = JuMP.value(F[edge]) + allocated = flow[edge] user_demand.allocated[user_demand_node_id.idx, priority_idx] = allocated end end @@ -150,7 +150,7 @@ function assign_allocations!( continue end for edge_id in main_network_source_edges - subnetwork_allocateds[edge_id][priority_idx] = JuMP.value(F[edge_id]) + subnetwork_allocateds[edge_id][priority_idx] = flow[edge_id] end end end @@ -169,11 +169,10 @@ function set_initial_capacities_inlet!( p::Parameters, optimization_type::OptimizationType.T, )::Nothing - (; problem) = allocation_model + (; sources) = allocation_model (; allocation) = p (; subnetwork_id) = allocation_model (; subnetwork_allocateds) = allocation - source_constraints = problem[:source] main_network_source_edges = get_main_network_connections(p, subnetwork_id) @@ -188,7 +187,9 @@ function set_initial_capacities_inlet!( # Set the source capacity to the sum over priorities of the values allocated to the subnetwork over this edge sum(subnetwork_allocateds[edge_id]) end - JuMP.set_normalized_rhs(source_constraints[edge_id], source_capacity) + source = sources[edge_id] + @assert source.type == AllocationSourceType.main_to_sub + source.capacity[] = source_capacity end return nothing end @@ -201,11 +202,10 @@ function set_initial_capacities_source!( allocation_model::AllocationModel, p::Parameters, )::Nothing - (; problem) = allocation_model + (; sources) = allocation_model (; graph, allocation) = p (; mean_input_flows) = allocation (; subnetwork_id) = allocation_model - source_constraints = problem[:source] main_network_source_edges = get_main_network_connections(p, subnetwork_id) for edge_metadata in values(graph.edge_data) @@ -214,12 +214,9 @@ function set_initial_capacities_source!( # If it is a source edge for this allocation problem if edge ∉ main_network_source_edges # Reset the source to the averaged flow over the last allocation period - source_capacity = mean_input_flows[edge][] - JuMP.set_normalized_rhs( - source_constraints[edge], - # It is assumed that the allocation procedure does not have to be differentiated. - source_capacity, - ) + source = sources[edge] + @assert source.type == AllocationSourceType.edge + source.capacity[] = mean_input_flows[edge][] end end end @@ -227,19 +224,58 @@ function set_initial_capacities_source!( end """ -Adjust the source capacities by the flow used from the sources. +Reduce the capacity of a source by the amount of flow taken from them in the latest optimization. """ -function adjust_capacities_source!(allocation_model::AllocationModel)::Nothing - (; problem) = allocation_model - source_constraints = problem[:source] - F = problem[:F] - - for edge in only(source_constraints.axes) - # Subtract the allocated flow from the source - JuMP.set_normalized_rhs( - source_constraints[edge], - JuMP.normalized_rhs(source_constraints[edge]) - JuMP.value(F[edge]), +function reduce_source_capacity!(problem::JuMP.Model, source::AllocationSource)::Nothing + (; edge) = source + + used_capacity = + if source.type in ( + AllocationSourceType.edge, + AllocationSourceType.main_to_sub, + AllocationSourceType.user_return, ) + JuMP.value(problem[:F][edge]) + elseif source.type == AllocationSourceType.basin + JuMP.value(problem[:F_basin_out][edge[1]]) + elseif source.type == AllocationSourceType.buffer + JuMP.value(problem[:F_flow_buffer_out][edge[1]]) + else + error("Unknown source type") + end + + source.capacity_reduced[] = max(source.capacity_reduced[] - used_capacity, 0.0) + return nothing +end + +""" +Increase the capacity of sources if applicable. Possible for +user return flow and flow demand buffers. +""" +function increase_source_capacities!( + allocation_model::AllocationModel, + p::Parameters, + t::AbstractFloat, +)::Nothing + (; problem, sources) = allocation_model + (; user_demand) = p + + for source in values(sources) + (; edge) = source + + additional_capacity = if source.type == AllocationSourceType.user_return + id_user_demand = edge[1] + inflow_edge = user_demand.inflow_edge[id_user_demand.idx].edge + user_demand.return_factor[id_user_demand.idx](t) * + JuMP.value(problem[:F][inflow_edge]) + elseif source.type == AllocationSourceType.buffer + id_connector_node = edge[1] + JuMP.value(problem[:F_flow_buffer_in][id_connector_node]) + else + continue + end + + source.capacity_reduced[] += additional_capacity end return nothing end @@ -271,12 +307,10 @@ function set_initial_capacities_edge!( end """ -Set the values of the edge capacities. 2 cases: -- Before the first allocation solve, set the edge capacities to their full capacity; -- Before an allocation solve, subtract the flow used by allocation for the previous priority - from the edge capacities. +Before an allocation solve, subtract the flow used by allocation for the previous priority +from the edge capacities. """ -function adjust_capacities_edge!(allocation_model::AllocationModel)::Nothing +function reduce_edge_capacities!(allocation_model::AllocationModel)::Nothing (; problem) = allocation_model constraints_capacity = problem[:capacity] F = problem[:F] @@ -286,7 +320,10 @@ function adjust_capacities_edge!(allocation_model::AllocationModel)::Nothing # from the edge capacities JuMP.set_normalized_rhs( constraints_capacity[edge_id], - JuMP.normalized_rhs(constraints_capacity[edge_id]) - JuMP.value(F[edge_id]), + max( + 0.0, + JuMP.normalized_rhs(constraints_capacity[edge_id]) - JuMP.value(F[edge_id]), + ), ) end end @@ -385,45 +422,21 @@ vertical fluxes + the disk of storage above the maximum level / Δt_allocation """ function set_initial_capacities_basin!( allocation_model::AllocationModel, - p::Parameters, u::ComponentVector, + p::Parameters, t::Float64, )::Nothing - (; problem) = allocation_model + (; problem, sources) = allocation_model constraints_outflow = problem[:basin_outflow] for node_id in only(constraints_outflow.axes) - constraint = constraints_outflow[node_id] - JuMP.set_normalized_rhs( - constraint, - get_basin_capacity(allocation_model, u, p, t, node_id), - ) + source = sources[(node_id, node_id)] + @assert source.type == AllocationSourceType.basin + source.capacity[] = get_basin_capacity(allocation_model, u, p, t, node_id) end return nothing end -""" -Set the values of the basin outflows. 2 cases: -- Before the first allocation solve, set the capacities to their full capacity if there is surplus storage; -- Before an allocation solve, subtract the flow used by allocation for the previous priority - from the capacities. -""" -function adjust_capacities_basin!(allocation_model::AllocationModel)::Nothing - (; problem) = allocation_model - constraints_outflow = problem[:basin_outflow] - F_basin_out = problem[:F_basin_out] - - for node_id in only(constraints_outflow.axes) - constraint = constraints_outflow[node_id] - JuMP.set_normalized_rhs( - constraint, - JuMP.normalized_rhs(constraint) - JuMP.value(F_basin_out[node_id]), - ) - end - - return nothing -end - """ Set the demands of the user demand nodes as given by either a coupled model or a timeseries @@ -478,52 +491,27 @@ end """ Set the initial capacities of the UserDemand return flow sources to 0. """ -function set_initial_capacities_returnflow!(allocation_model::AllocationModel)::Nothing - (; problem) = allocation_model - constraints_outflow = problem[:source_user] - - for node_id in only(constraints_outflow.axes) - constraint = constraints_outflow[node_id] - capacity = 0.0 - JuMP.set_normalized_rhs(constraint, capacity) - end - return nothing -end - -""" -Add the return flow fraction of the inflow to the UserDemand nodes -to the capacity of the outflow source. -""" -function adjust_capacities_returnflow!( +function set_initial_capacities_returnflow!( allocation_model::AllocationModel, p::Parameters, - t::Float64, )::Nothing - (; graph, user_demand) = p - (; problem) = allocation_model + (; problem, sources) = allocation_model + (; user_demand) = p constraints_outflow = problem[:source_user] - F = problem[:F] for node_id in only(constraints_outflow.axes) - constraint = constraints_outflow[node_id] - capacity = - JuMP.normalized_rhs(constraint) + - user_demand.return_factor[node_id.idx](t) * - JuMP.value(F[(inflow_id(graph, node_id), node_id)]) - - JuMP.set_normalized_rhs(constraint, capacity) + source = sources[user_demand.outflow_edge[node_id.idx].edge] + @assert source.type == AllocationSourceType.user_return + source.capacity[] = 0.0 end - return nothing end """ -Set the demand of the flow demand nodes. 2 cases: -- Before the first allocation solve, set the demands to their full value; -- Before an allocation solve, subtract the flow trough the node with a flow demand - from the total flow demand (which will be used at the priority of the flow demand only). +Before an allocation solve, subtract the flow trough the node with a flow demand +from the total flow demand (which will be used at the priority of the flow demand only). """ -function adjust_demands!( +function reduce_demands!( allocation_model::AllocationModel, p::Parameters, priority_idx::Int, @@ -553,7 +541,7 @@ Subtract the allocated flow to the basin from its demand, to obtain the reduced demand used for goal programming """ -function adjust_demands!( +function reduce_demands!( allocation_model::AllocationModel, p::Parameters, ::Int, @@ -599,7 +587,7 @@ end Reduce the flow demand based on flow trough the node with the demand. Flow from any priority counts. """ -function adjust_demands!( +function reduce_demands!( allocation_model::AllocationModel, p::Parameters, ::Int, @@ -632,79 +620,17 @@ end Set the flow buffer of nodes with a flow demand to 0.0 """ function set_initial_capacities_buffer!(allocation_model::AllocationModel)::Nothing - (; problem) = allocation_model - constraints_flow_buffer = problem[:flow_buffer_outflow] - - for node_id in only(constraints_flow_buffer.axes) - constraint = constraints_flow_buffer[node_id] - buffer_capacity = 0.0 - JuMP.set_normalized_rhs(constraint, buffer_capacity) - end - return nothing -end - -""" -Increase the capacities of the flow buffers of nodes with a flow demand -by the inflow to the respective buffers. -""" -function adjust_capacities_buffer!(allocation_model::AllocationModel)::Nothing - (; problem) = allocation_model - + (; problem, sources) = allocation_model constraints_flow_buffer = problem[:flow_buffer_outflow] - F_flow_buffer_in = problem[:F_flow_buffer_in] - F_flow_buffer_out = problem[:F_flow_buffer_out] - for node_id in only(constraints_flow_buffer.axes) - constraint = constraints_flow_buffer[node_id] - - # The capacity should not be able to get below 0, but can get to small negative numbers, - # probably due to floating point errors. Therefore new_capacity = max(0, new_capacity) is applied. - buffer_capacity = max( - 0.0, - JuMP.normalized_rhs(constraint) + JuMP.value(F_flow_buffer_in[node_id]) - - JuMP.value(F_flow_buffer_out[node_id]), - ) - JuMP.set_normalized_rhs(constraint, buffer_capacity) + source = sources[(node_id, node_id)] + @assert source.type == AllocationSourceType.buffer + source.capacity[] = 0.0 end return nothing end -""" -Set the capacity of the outflow edge from a node with a flow demand: -- To Inf if the current priority is other than the priority of the flow demand -- To 0.0 if the current priority is equal to the priority of the flow demand - -This is done so that flow can go towards the node with the flow demand into its buffer, -to avoid the problem that the flow has nowhere to go after this node and to make sure -that this flow can be used for later priorities -""" -function set_capacities_flow_demand_outflow!( - allocation_model::AllocationModel, - p::Parameters, - priority_idx::Int, -)::Nothing - (; graph, allocation, flow_demand) = p - (; priorities) = allocation - (; problem) = allocation_model - priority = priorities[priority_idx] - constraints = problem[:flow_demand_outflow] - - for node_id in only(constraints.axes) - constraint = constraints[node_id] - node_id_flow_demand = only(inneighbor_labels_type(graph, node_id, EdgeType.control)) - priority_flow_demand = flow_demand.priority[node_id_flow_demand.idx] - - capacity = if priority == priority_flow_demand - 0.0 - else - Inf - end - - JuMP.set_normalized_rhs(constraint, capacity) - end -end - """ Save the demands and allocated flows for UserDemand and Basin. Note: Basin supply (negative demand) is only saved for the first priority. @@ -717,23 +643,23 @@ function save_demands_and_allocations!( )::Nothing (; graph, allocation, user_demand, flow_demand, basin) = p (; record_demand, priorities, mean_realized_flows) = allocation - (; subnetwork_id, problem) = allocation_model + (; subnetwork_id, sources, flow) = allocation_model node_ids = graph[].node_ids[subnetwork_id] - constraints_outflow = problem[:basin_outflow] - F = problem[:F] - F_basin_in = problem[:F_basin_in] - F_basin_out = problem[:F_basin_out] + # Loop over nodes in subnetwork for node_id in node_ids has_demand = false if node_id.type == NodeType.UserDemand + # UserDemand nodes has_demand = true demand = user_demand.demand[node_id.idx, priority_idx] allocated = user_demand.allocated[node_id.idx, priority_idx] realized = mean_realized_flows[(inflow_id(graph, node_id), node_id)] - elseif has_external_demand(graph, node_id, :level_demand)[1] + elseif node_id.type == NodeType.Basin && + has_external_demand(graph, node_id, :level_demand)[1] + # Basins basin_priority_idx = get_external_priority_idx(p, node_id) if priority_idx == 1 || basin_priority_idx == priority_idx @@ -741,18 +667,18 @@ function save_demands_and_allocations!( demand = 0.0 if priority_idx == 1 # Basin surplus - demand -= JuMP.normalized_rhs(constraints_outflow[node_id]) + demand -= sources[(node_id, node_id)].capacity[] end if priority_idx == basin_priority_idx # Basin demand demand += basin.demand[node_id.idx] end - allocated = - JuMP.value(F_basin_in[node_id]) - JuMP.value(F_basin_out[node_id]) + allocated = basin.allocated[node_id.idx] realized = mean_realized_flows[(node_id, node_id)] end else + # Connector node with flow demand has_demand, flow_demand_node_id = has_external_demand(graph, node_id, :flow_demand) if has_demand @@ -761,7 +687,7 @@ function save_demands_and_allocations!( demand = priority_idx == flow_priority_idx ? flow_demand.demand[flow_demand_node_id.idx,] : 0.0 - allocated = JuMP.value(F[(inflow_id(graph, node_id), node_id)]) + allocated = flow[(inflow_id(graph, node_id), node_id)] realized = mean_realized_flows[(inflow_id(graph, node_id), node_id)] end end @@ -791,13 +717,13 @@ function save_allocation_flows!( priority::Int32, optimization_type::OptimizationType.T, )::Nothing - (; flow_priority, problem, subnetwork_id, capacity) = allocation_model + (; flow, problem, subnetwork_id) = allocation_model (; allocation, graph) = p (; record_flow) = allocation F_basin_in = problem[:F_basin_in] F_basin_out = problem[:F_basin_out] - edges_allocation = keys(capacity.data) + edges_allocation = keys(flow.data) skip = false @@ -812,12 +738,12 @@ function save_allocation_flows!( flow_rate = 0.0 if haskey(graph, edge_1...) - flow_rate += flow_priority[edge_1] + flow_rate += flow[edge_1] sign_2 = -1.0 edge_metadata = graph[edge_1...] else edge_1_reverse = reverse(edge_1) - flow_rate -= flow_priority[edge_1_reverse] + flow_rate -= flow[edge_1_reverse] sign_2 = 1.0 edge_metadata = graph[edge_1_reverse...] end @@ -827,7 +753,7 @@ function save_allocation_flows!( if edge_2 == reverse(edge_1) && !(edge_1[1].type == NodeType.UserDemand || edge_1[2].type == NodeType.UserDemand) # If so, these edges are both processed in this iteration - flow_rate += sign_2 * flow_priority[edge_2] + flow_rate += sign_2 * flow[edge_2] skip = true end @@ -872,7 +798,7 @@ function allocate_to_users_from_connected_basin!( p::Parameters, priority_idx::Int, )::Nothing - (; flow_priority, problem) = allocation_model + (; flow, problem, sources) = allocation_model (; graph, user_demand) = p # Get all UserDemand nodes from this subnetwork @@ -888,24 +814,154 @@ function allocate_to_users_from_connected_basin!( demand = user_demand.demand_reduced[node_id.idx, priority_idx] # The capacity of the upstream basin - constraint = problem[:basin_outflow][upstream_basin_id] - capacity = JuMP.normalized_rhs(constraint) + source = sources[(upstream_basin_id, upstream_basin_id)] + @assert source.type == AllocationSourceType.basin + capacity = source.capacity[] # The allocated amount allocated = min(demand, capacity) # Subtract the allocated amount from the user demand and basin capacity user_demand.demand_reduced[node_id.idx, priority_idx] -= allocated - JuMP.set_normalized_rhs(constraint, capacity - allocated) + source.capacity[] -= allocated # Add the allocated flow - flow_priority[(upstream_basin_id, node_id)] += allocated + flow[(upstream_basin_id, node_id)] += allocated end end return nothing end +""" +Set the capacity of the source that is currently being optimized for to its actual capacity, +and the capacities of all other sources to 0. +""" +function set_source_capacity!( + allocation_model::AllocationModel, + source_current::AllocationSource, + optimization_type::OptimizationType.T, +)::Nothing + (; problem, sources) = allocation_model + constraints_source_edge = problem[:source] + constraints_source_basin = problem[:basin_outflow] + constraints_source_user_out = problem[:source_user] + constraints_source_buffer = problem[:flow_buffer_outflow] + + for source in values(sources) + (; edge) = source + + capacity_effective = if source == source_current + if optimization_type == OptimizationType.collect_demands && + source.type == AllocationSourceType.main_to_sub + Inf + else + source_current.capacity_reduced[] + end + else + 0.0 + end + + constraint = + if source.type in (AllocationSourceType.edge, AllocationSourceType.main_to_sub) + constraints_source_edge[edge] + elseif source.type == AllocationSourceType.basin + constraints_source_basin[edge[1]] + elseif source.type == AllocationSourceType.user_return + constraints_source_user_out[edge[1]] + elseif source.type == AllocationSourceType.buffer + constraints_source_buffer[edge[1]] + end + + JuMP.set_normalized_rhs(constraint, capacity_effective) + end + + return nothing +end + +""" +Solve the allocation problem for a single priority by optimizing for each source +in the subnetwork individually. +""" +function optimize_per_source!( + allocation::Allocation, + allocation_model::AllocationModel, + priority_idx::Integer, + u::ComponentVector, + p::Parameters, + t::AbstractFloat, + optimization_type::OptimizationType.T, +)::Nothing + (; problem, sources, subnetwork_id, flow) = allocation_model + (; priorities) = allocation + + priority = priorities[priority_idx] + + for source in values(sources) + # Skip source when it has no capacity + if optimization_type !== OptimizationType.collect_demands && + source.capacity_reduced[] == 0.0 + continue + end + + # Set the objective depending on the demands + # A new objective function is set instead of modifying the coefficients + # of an existing objective function because this is not supported for + # quadratic terms: + # https://jump.dev/JuMP.jl/v1.16/manual/objective/#Modify-an-objective-coefficient + set_objective_priority!(allocation_model, u, p, t, priority_idx) + + # Set only the capacity of the current source to nonzero + set_source_capacity!(allocation_model, source, optimization_type) + + JuMP.optimize!(problem) + @debug JuMP.solution_summary(problem) + if JuMP.termination_status(problem) !== JuMP.OPTIMAL + error( + "Allocation of subnetwork $subnetwork_id, priority $priority, source $source couldn't find optimal solution.", + ) + end + + # Add the values of the flows at this priority + for edge in only(problem[:F].axes) + flow[edge] += max(JuMP.value(problem[:F][edge]), 0.0) + end + + # Adjust capacities for the optimization for the next source + increase_source_capacities!(allocation_model, p, t) + reduce_source_capacity!(problem, source) + reduce_edge_capacities!(allocation_model) + + # Adjust demands for next optimization (in case of internal_sources -> collect_demands) + for parameter in propertynames(p) + demand_node = getfield(p, parameter) + if demand_node isa AbstractDemandNode + reduce_demands!(allocation_model, p, priority_idx, demand_node) + end + end + + # Adjust allocated flow to basins + increase_allocateds!(p.basin, problem) + end + return nothing +end + +""" +Keep track of how much is taken from or added to the basins in the subnetwork. +""" +function increase_allocateds!(basin::Basin, problem::JuMP.Model)::Nothing + (; allocated) = basin + + F_basin_in = problem[:F_basin_in] + F_basin_out = problem[:F_basin_out] + + for node_id in only(F_basin_in.axes) + allocated[node_id.idx] += + JuMP.value(F_basin_in[node_id]) - JuMP.value(F_basin_out[node_id]) + end + return nothing +end + function optimize_priority!( allocation_model::AllocationModel, u::ComponentVector, @@ -914,45 +970,34 @@ function optimize_priority!( priority_idx::Int, optimization_type::OptimizationType.T, )::Nothing - (; problem, flow_priority) = allocation_model - (; allocation) = p + (; flow) = allocation_model + (; allocation, basin) = p (; priorities) = allocation # Start the values of the flows at this priority at 0.0 - for edge in keys(flow_priority.data) - flow_priority[edge] = 0.0 + for edge in keys(flow.data) + flow[edge] = 0.0 end - set_capacities_flow_demand_outflow!(allocation_model, p, priority_idx) - - # Set the objective depending on the demands - # A new objective function is set instead of modifying the coefficients - # of an existing objective function because this is not supported for - # quadratic terms: - # https://jump.dev/JuMP.jl/v1.16/manual/objective/#Modify-an-objective-coefficient - set_objective_priority!(allocation_model, p, u, t, priority_idx) + # Start the allocated amounts to basins at this priority at 0.0 + basin.allocated .= 0.0 # Allocate to UserDemand nodes from the directly connected basin # This happens outside the JuMP optimization allocate_to_users_from_connected_basin!(allocation_model, p, priority_idx) # Solve the allocation problem for this priority - JuMP.optimize!(problem) - @debug JuMP.solution_summary(problem) - if JuMP.termination_status(problem) !== JuMP.OPTIMAL - (; subnetwork_id) = allocation_model - priority = priorities[priority_idx] - error( - "Allocation of subnetwork $subnetwork_id, priority $priority coudn't find optimal solution.", - ) - end - - # Add the values of the flows at this priority - for edge in only(problem[:F].axes) - flow_priority[edge] += JuMP.value(problem[:F][edge]) - end + optimize_per_source!( + allocation, + allocation_model, + priority_idx, + u, + p, + t, + optimization_type, + ) - # Assign the allocations to the UserDemand for this priority + # Assign the allocations to the UserDemand or subnetwork for this priority assign_allocations!(allocation_model, p, priority_idx, optimization_type) # Save the demands and allocated flows for all nodes that have these @@ -966,39 +1011,27 @@ function optimize_priority!( priorities[priority_idx], optimization_type, ) - - # Adjust capacities for the optimization for the next priority - adjust_capacities_source!(allocation_model) - adjust_capacities_edge!(allocation_model) - adjust_capacities_basin!(allocation_model) - adjust_capacities_buffer!(allocation_model) - adjust_capacities_returnflow!(allocation_model, p, t) - - # Adjust demands for next optimization (in case of internal_sources -> collect_demands) - for parameter in propertynames(p) - demand_node = getfield(p, parameter) - if demand_node isa AbstractDemandNode - adjust_demands!(allocation_model, p, priority_idx, demand_node) - end - end return nothing end """ -Set the initial capacities and demands which are recudes by usage in the -adjust_capacities_*! and adjust_demands_*! functions respectively. +Set the initial capacities and demands which are reduced by usage. """ function set_initial_values!( allocation_model::AllocationModel, - p::Parameters, u::ComponentVector, + p::Parameters, t::Float64, )::Nothing set_initial_capacities_source!(allocation_model, p) set_initial_capacities_edge!(allocation_model, p) - set_initial_capacities_basin!(allocation_model, p, u, t) + set_initial_capacities_basin!(allocation_model, u, p, t) set_initial_capacities_buffer!(allocation_model) - set_initial_capacities_returnflow!(allocation_model) + set_initial_capacities_returnflow!(allocation_model, p) + + for source in values(allocation_model.sources) + source.capacity_reduced[] = source.capacity[] + end set_initial_demands_user!(allocation_model, p, t) set_initial_demands_level!(allocation_model, u, p, t) @@ -1043,7 +1076,7 @@ function collect_demands!( ## Find internal sources optimization_type = OptimizationType.internal_sources set_initial_capacities_inlet!(allocation_model, p, optimization_type) - set_initial_values!(allocation_model, p, u, t) + set_initial_values!(allocation_model, u, p, t) # Loop over priorities for priority_idx in eachindex(priorities) @@ -1082,12 +1115,11 @@ function allocate_demands!( )::Nothing optimization_type = OptimizationType.allocate (; allocation) = p - (; subnetwork_id) = allocation_model (; priorities) = allocation set_initial_capacities_inlet!(allocation_model, p, optimization_type) - set_initial_values!(allocation_model, p, u, t) + set_initial_values!(allocation_model, u, p, t) # Loop over the priorities for priority_idx in eachindex(priorities) diff --git a/core/src/parameter.jl b/core/src/parameter.jl index 4e1c308d7..d4f32cde8 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -128,19 +128,42 @@ The caches are always initialized with zeros """ cache(len::Int)::Cache = LazyBufferCache(Returns(len); initializer! = set_zero!) +@enumx AllocationSourceType edge basin main_to_sub user_return buffer + +""" +Data structure for a single source within an allocation subnetwork. +edge: The outflow edge of the source +type: The type of source (edge, basin, main_to_sub, user_return, buffer) +capacity: The initial capacity of the source as determined by the physical layer +capacity_reduced: The capacity adjusted by passed optimizations +""" +@kwdef struct AllocationSource + edge::Tuple{NodeID, NodeID} + type::AllocationSourceType.T + capacity::Base.RefValue{Float64} = Ref(0.0) + capacity_reduced::Base.RefValue{Float64} = Ref(0.0) +end + +function Base.show(io::IO, source::AllocationSource) + (; edge, type) = source + print(io, "AllocationSource of type $type at edge $edge") +end + """ Store information for a subnetwork used for allocation. subnetwork_id: The ID of this allocation network capacity: The capacity per edge of the allocation network, as constrained by nodes that have a max_flow_rate -flow_priority: The flows over all the edges in the subnetwork for a certain priority (used for allocation_flow output) +flow: The flows over all the edges in the subnetwork for a certain priority (used for allocation_flow output) +sources: source data in preferred order of optimization problem: The JuMP.jl model for solving the allocation problem Δt_allocation: The time interval between consecutive allocation solves """ @kwdef struct AllocationModel subnetwork_id::Int32 capacity::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}} - flow_priority::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}} + flow::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}} + sources::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource} problem::JuMP.Model Δt_allocation::Float64 end @@ -386,8 +409,9 @@ end }, } level_to_area::Vector{ScalarInterpolation} - # Demands for allocation if applicable + # Values for allocation if applicable demand::Vector{Float64} = zeros(length(node_id)) + allocated::Vector{Float64} = zeros(length(node_id)) # Data source for parameter updates time::StructVector{BasinTimeV1, C, Int} # Storage for each Basin at the previous time step diff --git a/core/test/allocation_test.jl b/core/test/allocation_test.jl index 47050c682..d90832fe3 100644 --- a/core/test/allocation_test.jl +++ b/core/test/allocation_test.jl @@ -9,30 +9,31 @@ cfg = Ribasim.Config(toml_path) db_path = Ribasim.database_path(cfg) db = SQLite.DB(db_path) - p = Ribasim.Parameters(db, cfg) + close(db) + (; graph, allocation) = p - allocation.mean_input_flows[(NodeID(:FlowBoundary, 1, db), NodeID(:Basin, 2, db))] = 4.5 + allocation.mean_input_flows[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] = 4.5 allocation_model = p.allocation.allocation_models[1] + (; flow) = allocation_model u = ComponentVector(; storage = zeros(length(p.basin.node_id))) - Ribasim.allocate_demands!(p, allocation_model, 0.0, u) + t = 0.0 + Ribasim.allocate_demands!(p, allocation_model, t, u) # Last priority (= 2) flows - F = allocation_model.problem[:F] - @test JuMP.value(F[(NodeID(:Basin, 2, db), NodeID(:Pump, 5, db))]) ≈ 0.0 - @test JuMP.value(F[(NodeID(:Basin, 2, db), NodeID(:UserDemand, 10, db))]) ≈ 0.5 - @test JuMP.value(F[(NodeID(:Basin, 8, db), NodeID(:UserDemand, 12, db))]) ≈ 2.0 - @test JuMP.value(F[(NodeID(:Basin, 6, db), NodeID(:Outlet, 7, db))]) ≈ 2.0 - @test JuMP.value(F[(NodeID(:FlowBoundary, 1, db), NodeID(:Basin, 2, db))]) ≈ 0.5 - @test JuMP.value(F[(NodeID(:Basin, 6, db), NodeID(:UserDemand, 11, db))]) ≈ 0.0 + @test flow[(NodeID(:Basin, 2, p), NodeID(:Pump, 5, p))] ≈ 0.0 + @test flow[(NodeID(:Basin, 2, p), NodeID(:UserDemand, 10, p))] ≈ 0.5 + @test flow[(NodeID(:Basin, 8, p), NodeID(:UserDemand, 12, p))] ≈ 3.0 rtol = 1e-5 + @test flow[(NodeID(:UserDemand, 12, p), NodeID(:Basin, 8, p))] ≈ 1.0 rtol = 1e-5 + @test flow[(NodeID(:Basin, 6, p), NodeID(:Outlet, 7, p))] ≈ 2.0 rtol = 1e-5 + @test flow[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] ≈ 0.5 + @test flow[(NodeID(:Basin, 6, p), NodeID(:UserDemand, 11, p))] ≈ 0.0 (; allocated) = p.user_demand @test allocated[1, :] ≈ [0.0, 0.5] @test allocated[2, :] ≈ [4.0, 0.0] - @test allocated[3, :] ≈ [0.0, 2.0] - - close(db) + @test allocated[3, :] ≈ [0.0, 3.0] atol = 1e-5 end @testitem "Allocation objective" begin @@ -48,12 +49,14 @@ end config = Ribasim.Config(toml_path) model = Ribasim.run(config) @test successful_retcode(model) - (; p) = model.integrator + (; u, p, t) = model.integrator (; user_demand) = p - problem = p.allocation.allocation_models[1].problem - objective = JuMP.objective_function(problem) + allocation_model = p.allocation.allocation_models[1] + Ribasim.set_initial_values!(allocation_model, u, p, t) + Ribasim.set_objective_priority!(allocation_model, u, p, t, 1) + objective = JuMP.objective_function(allocation_model.problem) @test objective isa JuMP.QuadExpr # Quadratic expression - F = problem[:F] + F = allocation_model.problem[:F] to_user_5 = F[(NodeID(:Basin, 4, p), NodeID(:UserDemand, 5, p))] to_user_6 = F[(NodeID(:Basin, 4, p), NodeID(:UserDemand, 6, p))] @@ -153,6 +156,9 @@ end # Solving for the main network, containing subnetworks as UserDemands allocation_model = allocation_models[1] (; problem) = allocation_model + main_source = + allocation_model.sources[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] + main_source.capacity_reduced[] = 4.5 Ribasim.optimize_priority!(allocation_model, u, p, t, 1, OptimizationType.allocate) # Main network objective function @@ -192,13 +198,14 @@ end @test all(allocation_flow.edge_exists) @test user_demand.allocated[2, :] ≈ [4.0, 0.0, 0.0] atol = 1e-3 - @test user_demand.allocated[7, :] ≈ [0.0, 0.0, 0.000112] atol = 1e-5 + @test user_demand.allocated[7, :] ≈ [0.0, 0.0, 0.0] atol = 1e-3 end @testitem "Subnetworks with sources" begin using SQLite using Ribasim: NodeID, OptimizationType using ComponentArrays: ComponentVector + using OrdinaryDiffEqCore: get_du using JuMP toml_path = normpath( @@ -236,6 +243,33 @@ end [0.001, 0.0, 0.0] rtol = 1e-4 @test subnetwork_demands[(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))][1:2] ≈ [0.001, 0.001] rtol = 1e-4 + + model = Ribasim.run(toml_path) + (; u, p, t) = model.integrator + (; current_storage) = p.basin.current_properties + current_storage = current_storage[Float64[]] + du = get_du(model.integrator) + Ribasim.formulate_storages!(current_storage, du, u, p, t) + + current_storage ≈ Float32[ + 1.0346e6, + 1.0346e6, + 1.0346e6, + 1.0346e6, + 1.0346e6, + 13.83, + 40.10, + 6.029e5, + 4641, + 2402, + 6.03995, + 928.8, + 8.017, + 10417, + 5.619, + 10417, + 4.057, + ] end @testitem "Allocation level control" begin @@ -281,7 +315,7 @@ end # In this section (and following sections) the basin has no longer a (positive) demand, # since precipitation provides enough water to get the basin to its target level # The FlowBoundary flow gets fully allocated to the UserDemand - stage_2 = 2 * Δt_allocation .<= t .<= 9 * Δt_allocation + stage_2 = 2 * Δt_allocation .<= t .<= 8 * Δt_allocation stage_2_start_idx = findfirst(stage_2) u_stage_2(τ) = storage[stage_2_start_idx] + ϕ * (τ - t[stage_2_start_idx]) @test storage[stage_2] ≈ u_stage_2.(t[stage_2]) rtol = 1e-4 @@ -290,14 +324,14 @@ end # even though initially the level is below the maximum level. This is because the simulation # anticipates that the current precipitation is going to bring the basin level over # its maximum level - stage_3 = 9 * Δt_allocation .<= t .<= 13 * Δt_allocation + stage_3 = 8 * Δt_allocation .<= t .<= 13 * Δt_allocation stage_3_start_idx = findfirst(stage_3) u_stage_3(τ) = storage[stage_3_start_idx] + (q + ϕ - d) * (τ - t[stage_3_start_idx]) @test storage[stage_3] ≈ u_stage_3.(t[stage_3]) rtol = 1e-4 # At the start of this section precipitation stops, and so the UserDemand # partly uses surplus water from the basin to fulfill its demand - stage_4 = 13 * Δt_allocation .<= t .<= 15 * Δt_allocation + stage_4 = 13 * Δt_allocation .<= t .<= 17 * Δt_allocation stage_4_start_idx = findfirst(stage_4) u_stage_4(τ) = storage[stage_4_start_idx] + (q - d) * (τ - t[stage_4_start_idx]) @test storage[stage_4] ≈ u_stage_4.(t[stage_4]) rtol = 1e-4 @@ -305,7 +339,7 @@ end # From this point the basin is in a dynamical equilibrium, # since the basin has no supply so the UserDemand abstracts precisely # the flow from the level boundary - stage_5 = 16 * Δt_allocation .<= t + stage_5 = 18 * Δt_allocation .<= t stage_5_start_idx = findfirst(stage_5) u_stage_5(τ) = storage[stage_5_start_idx] @test storage[stage_5] ≈ u_stage_5.(t[stage_5]) rtol = 1e-4 @@ -364,15 +398,15 @@ end :flow_demand, )[1] - allocation_model = allocation.allocation_models[1] - (; problem) = allocation_model + (; allocation_models, record_flow) = allocation + allocation_model = allocation_models[1] + (; problem, flow, sources) = allocation_model F = problem[:F] F_flow_buffer_in = problem[:F_flow_buffer_in] F_flow_buffer_out = problem[:F_flow_buffer_out] node_id_with_flow_demand = NodeID(:TabulatedRatingCurve, 2, p) - constraint_flow_out = problem[:flow_demand_outflow][node_id_with_flow_demand] # Test flow conservation constraint containing flow buffer constraint_with_flow_buffer = @@ -383,16 +417,12 @@ end F_flow_buffer_in[node_id_with_flow_demand] + F_flow_buffer_out[node_id_with_flow_demand] - constraint_flow_demand_outflow = - JuMP.constraint_object(problem[:flow_demand_outflow][node_id_with_flow_demand]) - @test constraint_flow_demand_outflow.func == - F[(node_id_with_flow_demand, NodeID(:Basin, 3, p))] + 0.0 - @test constraint_flow_demand_outflow.set.upper == 0.0 - t = 0.0 (; u) = model.integrator optimization_type = OptimizationType.internal_sources - Ribasim.set_initial_values!(allocation_model, p, u, t) + Ribasim.set_initial_values!(allocation_model, u, p, t) + sources[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)].capacity_reduced[] = + 2e-3 # Priority 1 Ribasim.optimize_priority!( @@ -404,10 +434,13 @@ end optimization_type, ) objective = JuMP.objective_function(problem) + @test JuMP.UnorderedPair( + F_flow_buffer_in[node_id_with_flow_demand], + F_flow_buffer_in[node_id_with_flow_demand], + ) in keys(objective.terms) # Reduced demand @test flow_demand.demand[1] ≈ flow_demand.demand_itp[1](t) - 0.001 rtol = 1e-3 - @test JuMP.normalized_rhs(constraint_flow_out) == Inf ## Priority 2 Ribasim.optimize_priority!( @@ -421,8 +454,7 @@ end # No demand left @test flow_demand.demand[1] < 1e-10 # Allocated - @test JuMP.value(only(F_flow_buffer_in)) ≈ 0.001 rtol = 1e-3 - @test JuMP.normalized_rhs(constraint_flow_out) == 0.0 + @test JuMP.value(only(F_flow_buffer_in)) ≈ only(flow_demand.demand) atol = 1e-10 ## Priority 3 Ribasim.optimize_priority!( @@ -433,10 +465,8 @@ end 3, optimization_type, ) - @test JuMP.normalized_rhs(constraint_flow_out) == Inf # The flow from the source is used up in previous priorities - @test JuMP.value(F[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)]) ≈ 0 atol = - 1e-10 + @test flow[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)] ≈ 0 atol = 1e-10 # So flow from the flow buffer is used for UserDemand #4 @test JuMP.value(F_flow_buffer_out[node_id_with_flow_demand]) ≈ 0.001 rtol = 1e-3 # Flow taken from buffer @@ -454,10 +484,6 @@ end 4, optimization_type, ) - # Get demand from buffers - d = user_demand.demand_itp[3][4](t) - @test JuMP.value(F[(NodeID(:UserDemand, 4, p), NodeID(:Basin, 7, p))]) + - JuMP.value(F[(NodeID(:UserDemand, 6, p), NodeID(:Basin, 7, p))]) ≈ d rtol = 1e-3 # Realized flow demand model = Ribasim.run(toml_path) @@ -562,10 +588,10 @@ end priority_idx = 2 allocation_model = first(p.allocation.allocation_models) - Ribasim.set_initial_values!(allocation_model, p, u, t) - Ribasim.set_objective_priority!(allocation_model, p, u, t, priority_idx) + Ribasim.set_initial_values!(allocation_model, u, p, t) + Ribasim.set_objective_priority!(allocation_model, u, p, t, priority_idx) Ribasim.allocate_to_users_from_connected_basin!(allocation_model, p, priority_idx) - flow_data = allocation_model.flow_priority.data + flow_data = allocation_model.flow.data @test flow_data[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] == 0.0 @test flow_data[(NodeID(:Basin, 2, p), NodeID(:UserDemand, 3, p))] == 0.0015 @test flow_data[(NodeID(:UserDemand, 3, p), NodeID(:Basin, 5, p))] == 0.0 diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index d7d1bf71b..72c541320 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -313,64 +313,6 @@ end @test model.integrator.p.tabulated_rating_curve.table[end].t[end] == 1.2 end -@testitem "Profile" begin - import Tables - using DataInterpolations: LinearInterpolation, integral, invert_integral - - function lookup(profile, S) - level_to_area = LinearInterpolation(profile.A, profile.h; extrapolate = true) - storage_to_level = invert_integral(level_to_area) - - level = storage_to_level(max(S, 0.0)) - area = level_to_area(level) - return area, level - end - - n_interpolations = 100 - storage = range(0.0, 1000.0, n_interpolations) - - # Covers interpolation for constant and non-constant area, extrapolation for constant area - A = [1e-9, 100.0, 100.0] - h = [0.0, 10.0, 15.0] - S = integral.(Ref(LinearInterpolation(A, h)), h) - profile = (; S, A, h) - - # On profile points we reproduce the profile - for (; S, A, h) in Tables.rows(profile) - @test lookup(profile, S) == (A, h) - end - - # Robust to negative storage - @test lookup(profile, -1.0) == (profile.A[1], profile.h[1]) - - # On the first segment - S = 100.0 - A, h = lookup(profile, S) - @test h ≈ sqrt(S / 5) - @test A ≈ 10 * h - - # On the second segment and extrapolation - for S in [500.0 + 100.0, 1000.0 + 100.0] - local A, h - S = 500.0 + 100.0 - A, h = lookup(profile, S) - @test h ≈ 10.0 + (S - 500.0) / 100.0 - @test A == 100.0 - end - - # Covers extrapolation for non-constant area - A = [1e-9, 100.0] - h = [0.0, 10.0] - S = integral.(Ref(LinearInterpolation(A, h)), h) - - profile = (; A, h, S) - - S = 500.0 + 100.0 - A, h = lookup(profile, S) - @test h ≈ sqrt(S / 5) - @test A ≈ 10 * h -end - @testitem "Outlet constraints" begin using DataFrames: DataFrame using SciMLBase: successful_retcode diff --git a/core/test/utils_test.jl b/core/test/utils_test.jl index 0b8e2c3f9..9f07f956b 100644 --- a/core/test/utils_test.jl +++ b/core/test/utils_test.jl @@ -40,6 +40,64 @@ end @test !Ribasim.basin_bottom(basin, NodeID(:Terminal, 6, 1))[1] end +@testitem "Profile" begin + import Tables + using DataInterpolations: LinearInterpolation, integral, invert_integral + + function lookup(profile, S) + level_to_area = LinearInterpolation(profile.A, profile.h; extrapolate = true) + storage_to_level = invert_integral(level_to_area) + + level = storage_to_level(max(S, 0.0)) + area = level_to_area(level) + return area, level + end + + n_interpolations = 100 + storage = range(0.0, 1000.0, n_interpolations) + + # Covers interpolation for constant and non-constant area, extrapolation for constant area + A = [1e-9, 100.0, 100.0] + h = [0.0, 10.0, 15.0] + S = integral.(Ref(LinearInterpolation(A, h)), h) + profile = (; S, A, h) + + # On profile points we reproduce the profile + for (; S, A, h) in Tables.rows(profile) + @test lookup(profile, S) == (A, h) + end + + # Robust to negative storage + @test lookup(profile, -1.0) == (profile.A[1], profile.h[1]) + + # On the first segment + S = 100.0 + A, h = lookup(profile, S) + @test h ≈ sqrt(S / 5) + @test A ≈ 10 * h + + # On the second segment and extrapolation + for S in [500.0 + 100.0, 1000.0 + 100.0] + local A, h + S = 500.0 + 100.0 + A, h = lookup(profile, S) + @test h ≈ 10.0 + (S - 500.0) / 100.0 + @test A == 100.0 + end + + # Covers extrapolation for non-constant area + A = [1e-9, 100.0] + h = [0.0, 10.0] + S = integral.(Ref(LinearInterpolation(A, h)), h) + + profile = (; A, h, S) + + S = 500.0 + 100.0 + A, h = lookup(profile, S) + @test h ≈ sqrt(S / 5) + @test A ≈ 10 * h +end + @testitem "Convert levels to storages" begin using StructArrays: StructVector using Logging diff --git a/docs/concept/allocation.qmd b/docs/concept/allocation.qmd index 0901ba7d3..f90a83cec 100644 --- a/docs/concept/allocation.qmd +++ b/docs/concept/allocation.qmd @@ -7,7 +7,7 @@ Allocation is the process of assigning an allocated flow rate to demand nodes in The allocation problem is solved per subnetwork (and main network) of the Ribasim model. Each subnetwork is used to formulate an optimization problem with the [JuMP](https://jump.dev/JuMP.jl/stable/) package, which is solved using the [HiGHS solver](https://highs.dev/). For more in-depth information see also the example of solving the maximum flow problem with `JuMP.jl` [here](https://jump.dev/JuMP.jl/stable/tutorials/linear/network_flows/#The-max-flow-problem). -Before the optimization for each priority there is a simple step that tries to allocate flow to the `UserDemand` nodes from the their directly connected basin. +Before the optimization for each priority there is a simple step that tries to allocate flow to the UserDemand nodes from the Basin directly upstream. :::{.callout-note} within this *Allocation* section the main network is also considered to be a subnetwork. @@ -36,7 +36,7 @@ The following data of the parameters and state of a Ribasim model are relevant f ### The subnetwork -The allocation problem is solved per subnetwork, which is given by a subset $S \subset V$ of node ids. Different subnetworks are disjoint from eachother. +The allocation problem is solved per subnetwork, which is given by a subset $S \subset V$ of node IDs. Different subnetworks are disjoint from eachother. Nodes can also not be part of any subnetwork. ### Source flows @@ -63,7 +63,7 @@ On this page we assume that the priorities are given by all integers from $1$ to ### Flow demands -The subnetwork contains a subset of nodes $FD_S \subset S$ which have a demand of a single priority $p_{\text{fd}}$. With this we define +The subnetwork contains a subset of nodes $FD_S \subset S$ which have a demand of a single priority $p_{\text{fd}}$ for the flow through that node. With this we define $$ d^p_i(t) = \begin{cases} @@ -77,7 +77,7 @@ for all $i \in FD_S$. Here $d^{p_{\text{df}}}$ is given by the original flow dem ### Vertical fluxes and local storage -Apart from the source flows denoted by edges, there are other sources of water in the subnetwork, associated with the basins in the subnetwork $B_S = B \cap S$. Firstly there is the average over the last allocation interval $\Delta t_{\text{alloc}}$ of the vertical fluxes (precipitation, evaporation, infiltration and drainage) for each basin: +Apart from the source flows denoted by edges, there are other sources of water in the subnetwork, associated with the Basins in the subnetwork $B_S = B \cap S$. First, there is the average over the last allocation interval $\Delta t_{\text{alloc}}$ of the vertical fluxes (precipitation, evaporation, infiltration and drainage) for each Basin: $$ \phi_i(t) = \frac{1}{\Delta t_{\text{alloc}}}\int_{t - \Delta t_{\text{alloc}}}^t \left[Q_{P,i}(t') - Q_{E,i}(t') + Q_{\text{drn},i}(t') - Q_{\text{inf},i}(t') \right] dt', \quad \forall i \in B_S. $$ @@ -102,7 +102,7 @@ for all $i \in B_S$. Note that the basin demand has only a single priority, so f Nodes in the Ribasim model that have a `max_flow_rate`, i.e. Pump, Outlet and LinearResistance, put a constraint on the flow through that node. Some nodes only allow flow in one direction, like Pump, Outlet and TabulatedRatingCurve. #### UserDemand return flows -UserDemand nodes dictate proportional relationships between flows over edges in the subnetwork. The return factor is given by $0 \le r_i \le 1, i \in U_S$. +UserDemand nodes dictate proportional relationships between flows over edges in the subnetwork. The return factor is given by $0 \le r_i(t) \le 1, i \in U_S$. ## The subnetwork The subnetwork consists of a set of nodes $S \subset V$ and edges @@ -123,7 +123,6 @@ The capacities are determined in different ways: - If an edge does not exist in the allocation network, i.e. $(i,j) \notin E_S$ for certain $1 \le i,j\le n'$, then $(C_S)_{i,j} = 0$; - The capacity of the edge $e \in E_S$ is given by the smallest `max_flow_rate` of the nodes along the equivalent edges in the subnetwork. If there are no nodes with a `max_flow_rate`, the edge capacity is infinite; - If the edge is a source, the capacity of the edge is given by the flow rate of that source; -- If an edge comes from a node with a flow demand, it has infinite capacity at priorities other than this of this flow demand, and zero capacity otherwise. There are also capacities for special edges: @@ -133,7 +132,7 @@ There are also capacities for special edges: # The optimization problem -The optimization problem for a subnetwork is a linear optimization problem consisting of an objective function with associated constraints on a set of variables, all of which are introduced below. +The optimization problem for a subnetwork consists of a quadratic objective function with associated linear constraints on a set of variables, all of which are introduced below. ## The optimization variables @@ -145,7 +144,7 @@ There are several types of variable whose value has to be determined to solve th ## The optimization objective -The goal of allocation is to get the flow to nodes with demands as close as possible to these demands. To achieve this, a sum error of terms is minimized. +The goal of allocation is to get the flow to nodes with demands as close as possible to these demands. To achieve this, a sum of error terms is minimized. $$ \min E_{\text{user demand}} + E_{\text{level demand}} + E_{\text{flow demand}} @@ -208,7 +207,9 @@ $$ $$ :::{.callout-note} -When performing subnetwork demand collection, these capacities are set to $\infty$ for edges which connect the main network to a subnetwork. For all other sources the capacity is set to $0$, so that demand collection only uses flow from the main network inlet. +There are several things to note about the source constraints: +- The sources are not all used at once. There is an optimization for each source in a subnetwork, where only one source has nonzero capacity. +- When performing subnetwork demand collection, these capacities are set to $\infty$ for edges which connect the main network to a subnetwork. ::: Similar constraints hold for the flow out of basins, flow demand buffers and user demand outflow sources: @@ -225,7 +226,7 @@ F_{ij} \le (C^{UD}_S)_i, \quad \forall i \in U_S, \quad V_S^{\text{out}}(i) = \{ $$ Here we use that each UserDemand node in the allocation network has a unique outflow edge. The user outflow source capacities are increased after each optimization solve by the return fraction: $$ - r_i \cdot F_{ki}, \quad V_S^{\text{in}}(i) = \{k\}. + r_i(t) \cdot F_{ki}, \quad V_S^{\text{in}}(i) = \{k\}. $$ - Flow sign: Furthermore there are the non-negativity constraints for the flows and allocations, see [The optimization variables](/concept/allocation.qmd#the-optimization-variables). @@ -244,15 +245,14 @@ using SQLite using ComponentArrays: ComponentVector toml_path = normpath(@__DIR__, "../../generated_testmodels/allocation_example/ribasim.toml") -p = Ribasim.Model(toml_path).integrator.p -u = ComponentVector(; storage = zeros(length(p.basin.node_id))) +model = Ribasim.Model(toml_path) +(; u, p, t) = model.integrator allocation_model = p.allocation.allocation_models[1] -t = 0.0 priority_idx = 1 -Ribasim.set_objective_priority!(allocation_model, p, u, t, priority_idx) -Ribasim.set_initial_values!(allocation_model, p, u, t) +Ribasim.set_initial_values!(allocation_model, u, p, t) +Ribasim.set_objective_priority!(allocation_model, u, p, t, priority_idx) println(p.allocation.allocation_models[1].problem) ``` diff --git a/docs/dev/allocation.qmd b/docs/dev/allocation.qmd index 44800e7d9..4b1cb1595 100644 --- a/docs/dev/allocation.qmd +++ b/docs/dev/allocation.qmd @@ -2,12 +2,12 @@ title: "Allocation" --- # Overview of allocation implementation {#sec-allocation-overview} -In this document, the allocation workflow is explained. Below is an overview of it. +In this document the allocation workflow is explained. Below is an overview of it. ```{mermaid} flowchart TD subgraph update_allocation direction TB - G[update mean flows]-->E[collect demand] + G[update mean physical flows for allocation input]-->E[collect demand] E-->F[allocate] end style update_allocation fill:#9ff @@ -21,51 +21,31 @@ If allocation is used in a model, [Allocation structs](#sec-allocation-struct) a The allocation struct stores the data that is needed for the calculations and stores also the results of the calculation. In allocation, optimization is an essential part. `JuMP.jl` is used to model and solve the optimization problems that are defined by allocation. -The [AllocationModel struct](#sec-allocation-model-struct) is used for constructing the JuMP model. +The [`AllocationModel` struct](#sec-allocation-model-struct) is used for constructing the JuMP model. When an instance of `AllocationModel` is created, a JuMP optimization model is defined and initialized in the instance. More details on how allocation interacts with `JuMP.jl` is explained [here](#sec-jump-problem). After initialization, as the simulation starts, the allocation problem is solved and updated after every allocation timestep (which is specified in the TOML). -With every allocation timestep a new optimization problem is formulated and solved, using the latest available (simulation) model conditions and forcing and demand predictions. +With every allocation timestep a new optimization problem is formulated and solved, using the latest available data from the physical layer model and demands of the demand nodes. -The update of allocation (`update_allocation`) is repeating and spread into three parts: +The update of allocation (`update_allocation!`) is repeating and divided into three parts: -- Updating the mean flows. The mean flow data is only used only for output, not used by any internal functions. -- ["Collect demand"](/concept/allocation.qmd#sec-high-level-algorithm). This step initialize and solve the optimization problems that collects the demand from the subnetworks. +- Updating the mean flows. The mean flow data is used for output and to determine the capacity of sources in the allocation model. +- ["Collect demand"](/concept/allocation.qmd#sec-high-level-algorithm). This step initializes and solves the optimization problems that collects the demand from the subnetworks. - ["Allocate"](/concept/allocation.qmd#sec-high-level-algorithm). This step solves the optimization problems that allocates the demand. For the main network this step allocates to the subnetworks and demand nodes that are in the main network. For the subnetwork this step allocates to the demand nodes. -The steps "collect demand" and "allocate" correspond to the function `collect_demand` and `allocate_demand` in the code. +The steps "collect demand" and "allocate" correspond to the function `collect_demand!` and `allocate_demands!` in the code. -The iteration stops when it reaches the end time step. +The iteration stops when it reaches the end time of the simulation. ## The `Allocation` struct {#sec-allocation-struct} The `Allocation` struct stores necessary data and calculation results. -|field | type | description| -|------------ | -------- |---------------------------------------- | -|subnetwork_ids | Vector{Int32} | The unique sorted allocation network IDs| -|allocation_models | AllocationModel | The allocation models for the main network and subnetworks corresponding to subnetwork_ids| -|main_network_connections |Vector{Vector{Tuple{NodeID, NodeID}}} | (from_id, to_id) from the main network to the subnetwork per subnetwork| -|priorities |Vector{Int32}| All used priority values.| -|subnetwork_demands | Dict{Tuple{NodeID, NodeID}, Vector{Float64}} | The demand of an edge from the main network to a subnetwork| -|subnetwork_allocateds |Dict{Tuple{NodeID, NodeID}, Vector{Float64}} | The allocated flow of an edge from the main network to a subnetwork| -|mean_input_flows | Dict{Tuple{NodeID, NodeID}, Float64} | Flows averaged over Δt_allocation over edges that are allocation sources| -|mean_realized_flows | Dict{Tuple{NodeID, NodeID}, Float64} | Flows averaged over Δt_allocation over edges that realize a demand| -|record_demand | | A record of demands and allocated flows for nodes that have these| -|record_flow | | A record of all flows computed by allocation optimization, eventually saved to output file| - ## The `AllocationModel` struct {#sec-allocation-model-struct} The `AllocationModel` struct has all the data that is needed for the JuMP optimization problem. -|field | type | description| -|------------ | -------- |---------------------------------------- | -|subnetwork_id |Int32 |The ID of this allocation network| -|capacity | JuMP.Containers.SparseAxisArray | The capacity per edge of the allocation network, as constrained by nodes that have a max_flow_rate| -|problem | JuMP.Model | The JuMP.jl model for solving the allocation problem| -|Δt_allocation | Float64 | The time interval between consecutive allocation solves | - ## JuMP problem interaction {#sec-jump-problem} When working with optimization problems using JuMP, there are three fundamental components that need to be defined: @@ -82,15 +62,15 @@ More details about setting up variables in allocation can be found in the sectio - Constraints: These are the constraints that the optimization variables must satisfy. They are defined using the [`@constraint`](https://jump.dev/JuMP.jl/stable/api/JuMP/#@constraint) macro. The definition of the edge capacity constraints is shown in section [below](#sec-constraints-and-capacities). -`add_constraints_...` functions are used to [add constraints](#sec-initial-constraints) to the optimization problem. -The [initial value of the constraints](#sec-constraints-and-capacities) is set in the function `set_initial_values_...`. +`add_constraints_*` functions are used to [add constraints](#sec-initial-constraints) to the optimization problem. +The [initial value of the constraints](#sec-constraints-and-capacities) is set in the function `set_initial_values_*`. During the iteration, the constraints are updated based on the current state of the allocation network. -When [looping over priorities](#updating-capacities), the constraints are updated by the function `adjust_...`. +When [looping over priorities](#updating-capacities), the constraints are updated by the function `adjust_*`. - Objective function: This is the function that sets the objective of the optimization problem. It is defined using the [`@objective`](https://jump.dev/JuMP.jl/stable/api/JuMP/#@objective) macro. -The functions `JuMP.normalized_rhs` and `JuMP.set_normalized_rhs` are used to read and write the constant right hand side of constraints. +The functions `JuMP.normalized_rhs` and `JuMP.set_normalized_rhs` are used to read and write the constant right hand side of constraints respectively. For example, to update the capacity of one of the edges, `JuMP.normalized_rhs` moves all the constants to the right-hand sides and all variables to the left-hand side and `JuMP.set_normalized_rhs` sets the new right-hand-side value. ```julia @@ -125,7 +105,7 @@ There are three types of variables in the optimization problems: - flows in and out of a basin with a level demand - flows in and out of nodes that have a buffer, which are nodes that have a flow demand -The function `add_variables_flow` is used to add the variable of flows between the edges. The variables are obtained from the capacity array. +The function `add_variables_flow!` is used to add the variable of flows between the edges. The variables are obtained from the capacity array. And variables named by `F($startnode, $endnode)` are created. ```julia @@ -157,7 +137,7 @@ All the variables are greater and equal to 0. This is set when the variables are Other constraints are `capacity`, `source_user`, `source`, `flow_conservation`, `fractional_flow`, `basin_outflow`, `flow_buffer_outflow` and `flow_demand_outflow`. -For each set of constraints, a function named `add_constrains_[constraints name]` is created. +For each set of constraints, a function named `add_constrains_[constraints name]!` exists. Take `add_constraints_user_source` as an example, the nodes that are relevant for the constraints are added to the optimization problem by calling JuMP.\@constraint.