diff --git a/.github/workflows/core_compat_helper.yml b/.github/workflows/core_compat_helper.yml index 6db7c8918..ac35110da 100644 --- a/.github/workflows/core_compat_helper.yml +++ b/.github/workflows/core_compat_helper.yml @@ -37,13 +37,7 @@ jobs: - name: "Run CompatHelper" run: | import CompatHelper - CompatHelper.main(; subdirs=[ - "core", - "docs", - "build/create_binaries", - "build/libribasim", - "build/ribasim_cli" - ]) + CompatHelper.main(; subdirs=["core"]) shell: julia --color=yes {0} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 56a0cdec8..7b5586e8c 100644 --- a/.gitignore +++ b/.gitignore @@ -10,8 +10,8 @@ docs/build/ docs/site/ /generated_testmodels -build/create_binaries/ribasim_cli/ -build/create_binaries/libribasim/ +build/ribasim_cli/ +build/libribasim/ JuliaSysimage.dll LocalPreferences.toml @@ -147,7 +147,7 @@ dmypy.json .pyre/ /.luarc.json -build/ribasim_cli/tests/temp/ +build/tests/temp/ python/ribasim_api/tests/temp/ report.xml /utils/juliaup diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 02f29b113..c34f7a757 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: exclude: '.teamcity' - id: trailing-whitespace - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.1.15 hooks: - id: ruff types_or: [python, pyi, jupyter] @@ -26,6 +26,6 @@ repos: hooks: - id: nbstripout - repo: https://github.com/crate-ci/typos - rev: v1.16.26 + rev: v1.17.2 hooks: - id: typos diff --git a/.teamcity/Ribasim/buildTypes/Ribasim_Ribasim_MakeGitHubRelease.xml b/.teamcity/Ribasim/buildTypes/Ribasim_Ribasim_MakeGitHubRelease.xml index 3dd85bf1c..341866805 100644 --- a/.teamcity/Ribasim/buildTypes/Ribasim_Ribasim_MakeGitHubRelease.xml +++ b/.teamcity/Ribasim/buildTypes/Ribasim_Ribasim_MakeGitHubRelease.xml @@ -13,27 +13,9 @@ set -euxo pipefail . /usr/share/Modules/init/bash -module load github -# Get the name of the currently checked out tag -tag_name=$(git describe --tags --exact-match 2>/dev/null) - -# Check if a tag is checked out -if [ -n "$tag_name" ]; then - echo "Currently checked out tag: $tag_name" - - # Create a release using gh - gh release create "$tag_name" \ - --generate-notes \ - ribasim_cli_linux.zip \ - ribasim_cli_windows.zip \ - ribasim_qgis.zip \ - generated_testmodels.zip - - echo "Release created successfully." - -else - echo "No tag is currently checked out." -fi]]> +module load pixi +pixi run github-release +]]> @@ -130,4 +112,3 @@ fi]]> - diff --git a/.teamcity/Ribasim_Linux/buildTypes/Ribasim_Linux_BuildLibribasim.xml b/.teamcity/Ribasim_Linux/buildTypes/Ribasim_Linux_BuildLibribasim.xml index 21460aa82..f0595b117 100644 --- a/.teamcity/Ribasim_Linux/buildTypes/Ribasim_Linux_BuildLibribasim.xml +++ b/.teamcity/Ribasim_Linux/buildTypes/Ribasim_Linux_BuildLibribasim.xml @@ -4,7 +4,7 @@ - diff --git a/.teamcity/Ribasim_Windows/buildTypes/Ribasim_Windows_BuildLibribasim.xml b/.teamcity/Ribasim_Windows/buildTypes/Ribasim_Windows_BuildLibribasim.xml index 72138fa0d..da602fdf4 100644 --- a/.teamcity/Ribasim_Windows/buildTypes/Ribasim_Windows_BuildLibribasim.xml +++ b/.teamcity/Ribasim_Windows/buildTypes/Ribasim_Windows_BuildLibribasim.xml @@ -4,7 +4,7 @@ - diff --git a/Manifest.toml b/Manifest.toml index 9723368a8..42cbf3109 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.10.0" manifest_format = "2.0" -project_hash = "212d113fb58ab2e43d8fe0283d717923e72a9a88" +project_hash = "742e16f5de5af91dfff30d91944981a5c7673d44" [[deps.ADTypes]] git-tree-sha1 = "41c37aa88889c171f1300ceac1313c06e891d245" @@ -150,9 +150,9 @@ version = "0.2.4" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra"] -git-tree-sha1 = "c1deebd76f7a443d527fc0430d5758b8b2112ed8" +git-tree-sha1 = "1287e3872d646eed95198457873249bd9f0caed2" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.19.1" +version = "1.20.1" weakdeps = ["SparseArrays"] [deps.ChainRulesCore.extensions] @@ -172,9 +172,9 @@ version = "1.3.5" [[deps.CodecBzip2]] deps = ["Bzip2_jll", "Libdl", "TranscodingStreams"] -git-tree-sha1 = "c0ae2a86b162fb5d7acc65269b469ff5b8a73594" +git-tree-sha1 = "9b1ca1aa6ce3f71b3d1840c538a8210a043625eb" uuid = "523fee87-0ab8-5b00-afb7-3ecf72e48cfd" -version = "0.8.1" +version = "0.8.2" [[deps.CodecLz4]] deps = ["Lz4_jll", "TranscodingStreams"] @@ -184,9 +184,9 @@ version = "0.4.1" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "cd67fc487743b2f0fd4380d4cbd3a24660d0eec8" +git-tree-sha1 = "59939d8a997469ee05c4b4944560a820f9ba0d73" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.3" +version = "0.7.4" [[deps.CodecZstd]] deps = ["TranscodingStreams", "Zstd_jll"] @@ -355,9 +355,9 @@ uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" [[deps.Dictionaries]] deps = ["Indexing", "Random", "Serialization"] -git-tree-sha1 = "5bde104a45593207307e1481a58e0339d4643fca" +git-tree-sha1 = "8b73c5a704d74e78a114b785d648ceba1e5790a9" uuid = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" -version = "0.3.26" +version = "0.4.0" [[deps.DiffEqBase]] deps = ["ArrayInterface", "DataStructures", "DocStringExtensions", "EnumX", "EnzymeCore", "FastBroadcast", "ForwardDiff", "FunctionWrappers", "FunctionWrappersWrappers", "LinearAlgebra", "Logging", "Markdown", "MuladdMacro", "Parameters", "PreallocationTools", "PrecompileTools", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Static", "StaticArraysCore", "Statistics", "Tricks", "TruncatedStacktraces"] @@ -497,9 +497,9 @@ version = "0.3.2" [[deps.FastLapackInterface]] deps = ["LinearAlgebra"] -git-tree-sha1 = "b12f05108e405dadcc2aff0008db7f831374e051" +git-tree-sha1 = "d576a29bf8bcabf4b1deb9abe88a3d7f78306ab5" uuid = "29a986be-02c6-4525-aec4-84b980013641" -version = "2.0.0" +version = "2.0.1" [[deps.FileIO]] deps = ["Pkg", "Requires", "UUIDs"] @@ -939,9 +939,9 @@ version = "1.1.0" [[deps.MathOptInterface]] deps = ["BenchmarkTools", "CodecBzip2", "CodecZlib", "DataStructures", "ForwardDiff", "JSON", "LinearAlgebra", "MutableArithmetics", "NaNMath", "OrderedCollections", "PrecompileTools", "Printf", "SparseArrays", "SpecialFunctions", "Test", "Unicode"] -git-tree-sha1 = "e2ae8cf5ac6daf5a3959f7f6ded9c2028b61d09d" +git-tree-sha1 = "8b40681684df46785a0012d352982e22ac3be59e" uuid = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" -version = "1.25.1" +version = "1.25.2" [[deps.MatrixFactorizations]] deps = ["ArrayLayouts", "LinearAlgebra", "Printf", "Random"] @@ -1026,9 +1026,9 @@ version = "1.2.0" [[deps.NonlinearSolve]] deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "DiffEqBase", "FastBroadcast", "FastClosures", "FiniteDiff", "ForwardDiff", "LazyArrays", "LineSearches", "LinearAlgebra", "LinearSolve", "MaybeInplace", "PrecompileTools", "Preferences", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "SparseArrays", "SparseDiffTools", "StaticArraysCore", "TimerOutputs"] -git-tree-sha1 = "78bdd3a4a62865cf43c53d63783b0cbfddcdbbe6" +git-tree-sha1 = "323d2a61f4adc4dfe404bf332b59690253b4f4f2" uuid = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" -version = "3.5.0" +version = "3.5.3" [deps.NonlinearSolve.extensions] NonlinearSolveBandedMatricesExt = "BandedMatrices" @@ -1086,9 +1086,9 @@ version = "1.6.3" [[deps.OrdinaryDiffEq]] deps = ["ADTypes", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "ExponentialUtilities", "FastBroadcast", "FastClosures", "FillArrays", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "IfElse", "InteractiveUtils", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "MacroTools", "MuladdMacro", "NonlinearSolve", "Polyester", "PreallocationTools", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SimpleNonlinearSolve", "SimpleUnPack", "SparseArrays", "SparseDiffTools", "StaticArrayInterface", "StaticArrays", "TruncatedStacktraces"] -git-tree-sha1 = "7c6738f21fba2ccd07b7eaa9d23b437a8a97f1a1" +git-tree-sha1 = "c971a69e5eea2eba435b55e962c283f15502a0c8" uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -version = "6.69.0" +version = "6.70.0" [[deps.PackageCompiler]] deps = ["Artifacts", "Glob", "LazyArtifacts", "Libdl", "Pkg", "Printf", "RelocatableFolders", "TOML", "UUIDs", "p7zip_jll"] @@ -1191,9 +1191,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.ReTestItems]] deps = ["Dates", "Logging", "LoggingExtras", "Pkg", "Serialization", "Sockets", "Test", "TestEnv"] -git-tree-sha1 = "68a7d4fd86f12c2fc6dec60d566f033a10ffb5fb" +git-tree-sha1 = "32138ef9f7205330693b0e8e366370c470b2285d" uuid = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" -version = "1.23.0" +version = "1.23.1" [[deps.RecipesBase]] deps = ["PrecompileTools"] @@ -1203,14 +1203,15 @@ version = "1.3.4" [[deps.RecursiveArrayTools]] deps = ["Adapt", "ArrayInterface", "DocStringExtensions", "GPUArraysCore", "IteratorInterfaceExtensions", "LinearAlgebra", "RecipesBase", "SparseArrays", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables"] -git-tree-sha1 = "dd7fc1923fde0cc6cdff451352d17924b0704ca1" +git-tree-sha1 = "5a904ad526cc9a2c5b464f6642ce9dd230fd69b6" uuid = "731186ca-8d62-57ce-b412-fbd966d074cd" -version = "3.5.4" +version = "3.7.0" [deps.RecursiveArrayTools.extensions] RecursiveArrayToolsFastBroadcastExt = "FastBroadcast" RecursiveArrayToolsMeasurementsExt = "Measurements" RecursiveArrayToolsMonteCarloMeasurementsExt = "MonteCarloMeasurements" + RecursiveArrayToolsReverseDiffExt = ["ReverseDiff", "Zygote"] RecursiveArrayToolsTrackerExt = "Tracker" RecursiveArrayToolsZygoteExt = "Zygote" @@ -1218,6 +1219,7 @@ version = "3.5.4" FastBroadcast = "7034ab61-46d4-4ed7-9d0f-46aef9175898" Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" MonteCarloMeasurements = "0987c9cc-fe09-11e8-30f0-b96dd679fdca" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" @@ -1285,15 +1287,15 @@ version = "1.6.0" [[deps.SQLite_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Zlib_jll"] -git-tree-sha1 = "81f7d934b52b2441f7b44520bd982fdb3607b0da" +git-tree-sha1 = "75e28667a36b5650b5cc4baa266c5760c3672275" uuid = "76ed43ae-9a5d-5a62-8c75-30186b810ce8" -version = "3.43.0+0" +version = "3.45.0+0" [[deps.SciMLBase]] deps = ["ADTypes", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FillArrays", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLOperators", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables", "TruncatedStacktraces"] -git-tree-sha1 = "de41474ac529bf81598e064587421cc5ebc28fa0" +git-tree-sha1 = "e4344257d8a9dfc92e0fc113f0a5bababa8d2082" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -version = "2.20.0" +version = "2.22.0" [deps.SciMLBase.extensions] SciMLBaseChainRulesCoreExt = "ChainRulesCore" @@ -1441,9 +1443,9 @@ weakdeps = ["OffsetArrays", "StaticArrays"] [[deps.StaticArrays]] deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] -git-tree-sha1 = "f68dd04d131d9a8a8eb836173ee8f105c360b0c5" +git-tree-sha1 = "7b0e9c14c624e435076d19aea1e5cbdec2b9ca37" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.9.1" +version = "1.9.2" weakdeps = ["ChainRulesCore", "Statistics"] [deps.StaticArrays.extensions] @@ -1500,9 +1502,9 @@ uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" version = "7.2.1+1" [[deps.SymbolicIndexingInterface]] -git-tree-sha1 = "74502f408d99fc217a9d7cd901d9ffe45af892b1" +git-tree-sha1 = "b3103f4f50a3843e66297a2456921377c78f5e31" uuid = "2efcf032-c050-4f8e-a9bb-153293bab1f5" -version = "0.3.3" +version = "0.3.5" [[deps.TOML]] deps = ["Dates"] @@ -1511,9 +1513,9 @@ version = "1.0.3" [[deps.TZJData]] deps = ["Artifacts"] -git-tree-sha1 = "d39314cdbaf5b90a047db33858626f8d1cc973e1" +git-tree-sha1 = "b69f8338df046774bd975b13be9d297eca5efacb" uuid = "dc5dba14-91b3-4cab-a142-028a31da12f7" -version = "1.0.0+2023c" +version = "1.1.0+2023d" [[deps.TableTraits]] deps = ["IteratorInterfaceExtensions"] @@ -1656,29 +1658,11 @@ git-tree-sha1 = "49ce682769cd5de6c72dcf1b94ed7790cd08974c" uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" version = "1.5.5+0" -[[deps.create_binaries]] -deps = ["Artifacts", "LibGit2", "PackageCompiler", "TOML", "TimeZones"] -path = "build/create_binaries" -uuid = "3cfb6a46-05f0-43df-bb16-bf763deb14b4" -version = "0.1.0" - -[[deps.docs]] -deps = ["Configurations", "DataFrames", "Dates", "Documenter", "DocumenterMarkdown", "IJulia", "InteractiveUtils", "JSON3", "Legolas", "Logging", "MarkdownTables", "OrderedCollections", "Ribasim"] -path = "docs" -uuid = "8daea9ca-fc6c-4731-aa85-717fa0b706bc" -version = "0.1.0" - [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" version = "5.8.0+1" -[[deps.libribasim]] -deps = ["BasicModelInterface", "Dates", "Ribasim", "TOML"] -path = "build/libribasim" -uuid = "f319f290-633d-4573-adfe-d6d5548b6388" -version = "0.1.0" - [[deps.libsodium_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "848ab3d00fe39d6fbc2a8641048f8f272af1c51e" @@ -1694,9 +1678,3 @@ version = "1.52.0+1" deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" version = "17.4.0+2" - -[[deps.ribasim_cli]] -deps = ["Dates", "Logging", "Ribasim", "SciMLBase", "TOML", "TerminalLoggers"] -path = "build/ribasim_cli" -uuid = "e45c999e-e944-4589-a8e6-7d0b7a60140a" -version = "0.1.0" diff --git a/Project.toml b/Project.toml index ea6fed355..8ba521efb 100644 --- a/Project.toml +++ b/Project.toml @@ -3,23 +3,55 @@ authors = ["Deltares and contributors "] description = "Meta-project used to share the Manifest of Ribasim and its dependents" [deps] +Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45" +Artifacts = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" BasicModelInterface = "59605e27-edc0-445a-b93d-c09a3a50b330" +CodecLz4 = "5ba52731-8f18-5e0d-9241-30f10d1ec561" +CodecZstd = "6b39b394-51ab-5f42-8807-6242bab2b4c2" +ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" +Configurations = "5218b696-f38b-4ac9-8b61-a12ec717816d" +DBInterface = "a10d1c49-ce27-4219-8d33-6db1a4562965" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +DataInterpolations = "82cc6244-b520-54b8-b5a6-8a565e85f1d0" +DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" +Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" +DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +DocumenterMarkdown = "997ab1e6-3595-5248-9280-8efb232c3433" +EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" +FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" +HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" +IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" Infiltrator = "5903a43b-9cc3-4c30-8d17-598619ec4e9b" +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +IterTools = "c8e1da08-722c-5040-9ed9-7db0dc04731e" +JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" +Legolas = "741b9549-f6ed-4911-9fbf-4a1c0c97f0cd" +LibGit2 = "76f85450-5226-5b5a-8eaa-529ad045b433" +Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" +LoggingExtras = "e6f89c97-d47a-5376-807f-9c37f3926c36" +MarkdownTables = "1862ce21-31c7-451e-824c-f20fa3f90fa2" MetaGraphsNext = "fa8bd995-216d-47f1-8a91-f3b68fbeb377" +OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" +PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" Revise = "295af30f-e4ad-537b-8983-00126c2a3abe" Ribasim = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" +TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" +TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" TestEnv = "1e6cf692-eddd-4d53-88a5-2d735e33781b" TimeZones = "f269a46b-ccf7-5d73-abea-4c690281aa53" -create_binaries = "3cfb6a46-05f0-43df-bb16-bf763deb14b4" -docs = "8daea9ca-fc6c-4731-aa85-717fa0b706bc" -libribasim = "f319f290-633d-4573-adfe-d6d5548b6388" -ribasim_cli = "e45c999e-e944-4589-a8e6-7d0b7a60140a" +TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" +TranscodingStreams = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" diff --git a/build/create_binaries/README.md b/build/README.md similarity index 100% rename from build/create_binaries/README.md rename to build/README.md diff --git a/build/create_binaries/build.jl b/build/build.jl similarity index 69% rename from build/create_binaries/build.jl rename to build/build.jl index 9c27c6e84..f19181825 100644 --- a/build/create_binaries/build.jl +++ b/build/build.jl @@ -1,4 +1,11 @@ -using create_binaries +using Artifacts +using PackageCompiler +using TOML +using LibGit2 + +include("src/add_metadata.jl") +include("src/create_app.jl") +include("src/create_lib.jl") """ Build the Ribasim CLI, libribasim, or both, using PackageCompiler. diff --git a/build/create_binaries/Project.toml b/build/create_binaries/Project.toml deleted file mode 100644 index aef92c80d..000000000 --- a/build/create_binaries/Project.toml +++ /dev/null @@ -1,21 +0,0 @@ -name = "create_binaries" -uuid = "3cfb6a46-05f0-43df-bb16-bf763deb14b4" -authors = ["Deltares and contributors "] -description = "Build Ribasim binaries with PackageCompiler" -manifest = "../../Manifest.toml" -version = "0.1.0" - -[deps] -Artifacts = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" -LibGit2 = "76f85450-5226-5b5a-8eaa-529ad045b433" -PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" -TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -TimeZones = "f269a46b-ccf7-5d73-abea-4c690281aa53" - -[compat] -Artifacts = "<0.0.1,1" -LibGit2 = "<0.0.1,1" -PackageCompiler = "2" -TOML = "<0.0.1,1" -TimeZones = "=1.14.0, 1" -julia = "1.10" diff --git a/build/create_binaries/src/create_app.jl b/build/create_binaries/src/create_app.jl deleted file mode 100644 index 056dc5b7b..000000000 --- a/build/create_binaries/src/create_app.jl +++ /dev/null @@ -1,32 +0,0 @@ -"Build the Ribasim CLI using PackageCompiler" -function build_app() - project_dir = "../ribasim_cli" - license_file = "../../LICENSE" - output_dir = "ribasim_cli" - git_repo = "../.." - - create_app( - project_dir, - output_dir; - # map from binary name to julia function name - executables = ["ribasim" => "julia_main"], - precompile_execution_file = "precompile.jl", - include_lazy_artifacts = true, - force = true, - ) - - add_metadata(project_dir, license_file, output_dir, git_repo) - - # On Windows, write ribasim.cmd in the output_dir, that starts ribasim.exe. - # Since the bin dir contains a julia.exe and many DLLs that you may not want in your path, - # with this script you can put output_dir in your path instead. - if Sys.iswindows() - cmd = raw""" - @echo off - "%~dp0bin\ribasim.exe" %* - """ - open(normpath(output_dir, "ribasim.cmd"); write = true) do io - print(io, cmd) - end - end -end diff --git a/build/create_binaries/src/create_binaries.jl b/build/create_binaries/src/create_binaries.jl deleted file mode 100644 index 24082d8ef..000000000 --- a/build/create_binaries/src/create_binaries.jl +++ /dev/null @@ -1,14 +0,0 @@ -module create_binaries - -using Artifacts -using PackageCompiler -using TOML -using LibGit2 - -export build_app, build_lib - -include("add_metadata.jl") -include("create_app.jl") -include("create_lib.jl") - -end diff --git a/build/create_binaries/src/create_lib.jl b/build/create_binaries/src/create_lib.jl deleted file mode 100644 index 8e8b01a31..000000000 --- a/build/create_binaries/src/create_lib.jl +++ /dev/null @@ -1,18 +0,0 @@ -"Build libribasim using PackageCompiler" -function build_lib() - project_dir = "../libribasim" - license_file = "../../LICENSE" - output_dir = "libribasim" - git_repo = "../.." - - create_library( - project_dir, - output_dir; - lib_name = "libribasim", - precompile_execution_file = "precompile.jl", - include_lazy_artifacts = true, - force = true, - ) - - add_metadata(project_dir, license_file, output_dir, git_repo) -end diff --git a/build/libribasim/Project.toml b/build/libribasim/Project.toml deleted file mode 100644 index 2b7700197..000000000 --- a/build/libribasim/Project.toml +++ /dev/null @@ -1,17 +0,0 @@ -name = "libribasim" -uuid = "f319f290-633d-4573-adfe-d6d5548b6388" -authors = ["Deltares and contributors "] -manifest = "../../Manifest.toml" -version = "0.1.0" - -[deps] -BasicModelInterface = "59605e27-edc0-445a-b93d-c09a3a50b330" -Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" -Ribasim = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" -TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" - -[compat] -BasicModelInterface = "0.1" -Dates = "<0.0.1,1" -TOML = "<0.0.1,1" -julia = "1.10" diff --git a/build/create_binaries/precompile.jl b/build/precompile.jl similarity index 80% rename from build/create_binaries/precompile.jl rename to build/precompile.jl index 73cd6c6f2..b12cc0ce8 100644 --- a/build/create_binaries/precompile.jl +++ b/build/precompile.jl @@ -1,7 +1,7 @@ # Workflow that will compile a lot of the code we will need. # With the purpose of reducing the latency for compiled binaries. -using Ribasim, Dates, TOML +using Ribasim toml_path = normpath(@__DIR__, "../../generated_testmodels/basic/ribasim.toml") -Ribasim.run(toml_path) +Ribasim.main(toml_path) diff --git a/build/ribasim_cli/Project.toml b/build/ribasim_cli/Project.toml deleted file mode 100644 index 728a085ca..000000000 --- a/build/ribasim_cli/Project.toml +++ /dev/null @@ -1,21 +0,0 @@ -name = "ribasim_cli" -uuid = "e45c999e-e944-4589-a8e6-7d0b7a60140a" -authors = ["Deltares and contributors "] -manifest = "../../Manifest.toml" -version = "0.1.0" - -[deps] -Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" -Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" -Ribasim = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" -SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" - -[compat] -Dates = "<0.0.1,1" -Logging = "<0.0.1,1" -SciMLBase = "1.60, 2" -TOML = "<0.0.1,1" -TerminalLoggers = "0.1.7" -julia = "1.10" diff --git a/build/ribasim_cli/README.md b/build/ribasim_cli/README.md deleted file mode 100644 index b51c5e27f..000000000 --- a/build/ribasim_cli/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Ribasim CLI - -This is a [Julia](https://julialang.org/) project that uses the -[Ribasim](https://github.com/Deltares/Ribasim) Julia package, puts a simple command line -interface (cli) on top, and packages this into a standalone application using -[PackageCompiler.jl](https://github.com/JuliaLang/PackageCompiler.jl). - -This enables using Ribasim without having to install Julia, and thus makes it more -convenient to use in certain settings where installation must be simple and no interactive -Julia session is needed. - -If you have installed Julia and Ribasim, a simulation can also be started from the command -line as follows: - -``` -julia --eval 'using Ribasim; Ribasim.run("path/to/model/ribasim.toml")' -``` - -With a Ribasim CLI build this becomes: - -``` -ribasim path/to/model/ribasim.toml -``` diff --git a/build/ribasim_cli/src/ribasim_cli.jl b/build/ribasim_cli/src/ribasim_cli.jl deleted file mode 100644 index 6e28f8507..000000000 --- a/build/ribasim_cli/src/ribasim_cli.jl +++ /dev/null @@ -1,7 +0,0 @@ -module ribasim_cli - -using Ribasim - -julia_main()::Cint = Ribasim.main(ARGS) - -end # module diff --git a/build/create_binaries/src/add_metadata.jl b/build/src/add_metadata.jl similarity index 94% rename from build/create_binaries/src/add_metadata.jl rename to build/src/add_metadata.jl index 27f3585ce..45e2ed836 100644 --- a/build/create_binaries/src/add_metadata.jl +++ b/build/src/add_metadata.jl @@ -7,7 +7,7 @@ Add the following metadata files to the newly created build: - README.md - LICENSE """ -function add_metadata(project_dir, license_file, output_dir, git_repo) +function add_metadata(project_dir, license_file, output_dir, git_repo, readme) # save some environment variables in a Build.toml file for debugging purposes vars = ["BUILD_NUMBER", "BUILD_VCS_NUMBER"] dict = Dict(var => ENV[var] for var in vars if haskey(ENV, var)) @@ -31,8 +31,9 @@ function add_metadata(project_dir, license_file, output_dir, git_repo) # put the LICENSE in the top level directory cp(license_file, normpath(output_dir, "LICENSE"); force = true) - cp(normpath(project_dir, "README.md"), normpath(output_dir, "README.md"); force = true) - open(normpath(output_dir, "README.md"), "a") do io + open(normpath(output_dir, "README.md"), "w") do io + println(io, readme) + # since the exact Ribasim version may be hard to find in the Manifest.toml file # we can also extract that information, and add it to the README.md manifest = TOML.parsefile(normpath(git_repo, "Manifest.toml")) diff --git a/build/src/create_app.jl b/build/src/create_app.jl new file mode 100644 index 000000000..90da2d131 --- /dev/null +++ b/build/src/create_app.jl @@ -0,0 +1,57 @@ +""" +# Ribasim CLI + +This is a [Julia](https://julialang.org/) project that uses the +[Ribasim](https://github.com/Deltares/Ribasim) Julia package, puts a simple command line +interface (cli) on top, and packages this into a standalone application using +[PackageCompiler.jl](https://github.com/JuliaLang/PackageCompiler.jl). + +This enables using Ribasim without having to install Julia, and thus makes it more +convenient to use in certain settings where installation must be simple and no interactive +Julia session is needed. + +If you have installed Julia and Ribasim, a simulation can also be started from the command +line as follows: + +``` +julia --eval 'using Ribasim; Ribasim.main("path/to/model/ribasim.toml")' +``` + +With a Ribasim CLI build this becomes: + +``` +ribasim path/to/model/ribasim.toml +``` +""" +function build_app() + project_dir = "../core" + license_file = "../LICENSE" + output_dir = "ribasim_cli" + git_repo = ".." + + create_app( + project_dir, + output_dir; + # map from binary name to julia function name + executables = ["ribasim" => "main"], + precompile_execution_file = "precompile.jl", + include_lazy_artifacts = true, + force = true, + ) + + readme = @doc(build_app) + add_metadata(project_dir, license_file, output_dir, git_repo, readme) + + # On Windows, write ribasim.cmd in the output_dir, that starts ribasim.exe. + # Since the bin dir contains a julia.exe and many DLLs that you may not want in your path, + # with this script you can put output_dir in your path instead. + if Sys.iswindows() + cmd = raw""" + @echo off + "%~dp0bin\ribasim.exe" %* + """ + open(normpath(output_dir, "ribasim.cmd"); write = true) do io + print(io, cmd) + end + end +end diff --git a/build/libribasim/README.md b/build/src/create_lib.jl similarity index 59% rename from build/libribasim/README.md rename to build/src/create_lib.jl index 5906696a7..5044d91b8 100644 --- a/build/libribasim/README.md +++ b/build/src/create_lib.jl @@ -1,13 +1,14 @@ +""" # Libribasim Libribasim is a shared library that exposes Ribasim functionality to external (non-Julian) programs. It can be compiled using [PackageCompiler's -create_lib](https://julialang.github.io/PackageCompiler.jl/stable/libs.html) , which is set -up in this directory. The C API that is offered to control Ribasim is the C API of the [Basic -Model Interface](https://bmi.readthedocs.io/en/latest/), also known as BMI. +create_lib](https://julialang.github.io/PackageCompiler.jl/stable/libs.html), which is set +up in this directory. The C API that is offered to control Ribasim is the C API of the +[Basic Model Interface](https://bmi.readthedocs.io/en/latest/), also known as BMI. Not all BMI functions are implemented yet, this has been set up as a proof of concept to -demonstrate that we could use other software such as +demonstrate that we can use other software such as [`imod_coupler`](https://github.com/Deltares/imod_coupler) to control Ribasim and couple it to other models. @@ -30,3 +31,22 @@ Out[5]: 0 In [6]: c_dll.update() Out[6]: 0 ``` +""" +function build_lib() + project_dir = "../core" + license_file = "../LICENSE" + output_dir = "libribasim" + git_repo = ".." + + create_library( + project_dir, + output_dir; + lib_name = "libribasim", + precompile_execution_file = "precompile.jl", + include_lazy_artifacts = true, + force = true, + ) + + readme = @doc(build_app) + add_metadata(project_dir, license_file, output_dir, git_repo, readme) +end diff --git a/build/ribasim_cli/tests/test_models.py b/build/tests/test_models.py similarity index 83% rename from build/ribasim_cli/tests/test_models.py rename to build/tests/test_models.py index 2a005c3e8..f4d23bb41 100644 --- a/build/ribasim_cli/tests/test_models.py +++ b/build/tests/test_models.py @@ -19,11 +19,7 @@ def test_ribasim_cli(model_constructor, tmp_path): extension = ".exe" if platform.system() == "Windows" else "" executable = ( - Path(__file__).parents[2] - / "create_binaries" - / "ribasim_cli" - / "bin" - / f"ribasim{extension}" + Path(__file__).parents[1] / "ribasim_cli" / "bin" / f"ribasim{extension}" ) result = subprocess.run([executable, tmp_path / "ribasim.toml"]) diff --git a/core/Project.toml b/core/Project.toml index 2e22edab1..a8010212a 100644 --- a/core/Project.toml +++ b/core/Project.toml @@ -56,7 +56,7 @@ DataFrames = "1.4" DataInterpolations = "4.4" DataStructures = "0.18" Dates = "<0.0.1,1" -Dictionaries = "0.3.25" +Dictionaries = "0.3.25, 0.4" DiffEqCallbacks = "2.29.1" Documenter = "0.27,1" EnumX = "1.0" diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index 79947b8a6..ca675a366 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -44,7 +44,9 @@ using Graphs: inneighbors, nv, outneighbors, - rem_edge! + rem_edge!, + induced_subgraph, + is_connected using Legolas: Legolas, @schema, @version, validate, SchemaVersion, declared using Logging: with_logger, LogLevel, AbstractLogger @@ -68,6 +70,8 @@ using Tables: Tables, AbstractRow, columntable, getcolumn using TerminalLoggers: TerminalLogger using TimerOutputs +export libribasim + const to = TimerOutput() TimerOutputs.complement!() @@ -84,5 +88,6 @@ include("create.jl") include("bmi.jl") include("consts.jl") include("main.jl") +include("libribasim.jl") end # module Ribasim diff --git a/core/src/allocation.jl b/core/src/allocation.jl index 60bc671ec..4e3f22f03 100644 --- a/core/src/allocation.jl +++ b/core/src/allocation.jl @@ -1,19 +1,45 @@ +"""Find the edges from the main network to a subnetwork.""" +function find_subnetwork_connections!(p::Parameters)::Nothing + (; allocation, graph, user) = p + n_priorities = length(user.demand[1]) + (; subnetwork_demands, subnetwork_allocateds) = allocation + for node_id in graph[].node_ids[1] + for outflow_id in outflow_ids(graph, node_id) + if graph[outflow_id].allocation_network_id != 1 + main_network_source_edges = + get_main_network_connections(p, graph[outflow_id].allocation_network_id) + edge = (node_id, outflow_id) + push!(main_network_source_edges, edge) + subnetwork_demands[edge] = zeros(n_priorities) + subnetwork_allocateds[edge] = zeros(n_priorities) + end + end + end + return nothing +end + """ Find all nodes in the subnetwork which will be used in the allocation network. Some nodes are skipped to optimize allocation optimization. """ function allocation_graph_used_nodes!(p::Parameters, allocation_network_id::Int)::Nothing - (; graph, basin, fractional_flow) = p + (; graph, basin, fractional_flow, allocation) = p + (; main_network_connections) = allocation node_ids = graph[].node_ids[allocation_network_id] used_nodes = Set{NodeID}() for node_id in node_ids + use_node = false has_fractional_flow_outneighbors = get_fractional_flow_connected_basins(node_id, basin, fractional_flow, graph)[3] node_type = graph[node_id].type if node_type in [:user, :basin, :terminal] - push!(used_nodes, node_id) + use_node = true elseif has_fractional_flow_outneighbors + use_node = true + end + + if use_node push!(used_nodes, node_id) end end @@ -21,13 +47,24 @@ function allocation_graph_used_nodes!(p::Parameters, allocation_network_id::Int) # Add nodes in the allocation graph for nodes connected to the source edges # One of these nodes can be outside the subnetwork, as long as the edge # connects to the subnetwork - for edge_metadata in graph[].edges_source[allocation_network_id] + edges_source = graph[].edges_source + for edge_metadata in get(edges_source, allocation_network_id, Set{EdgeMetadata}()) (; from_id, to_id) = edge_metadata push!(used_nodes, from_id) push!(used_nodes, to_id) end filter!(in(used_nodes), node_ids) + + # For the main network, include nodes that connect the main network to a subnetwork + # (also includes nodes not in the main network in the input) + if is_main_network(allocation_network_id) + for connections_subnetwork in main_network_connections + for connection in connections_subnetwork + union!(node_ids, connection) + end + end + end return nothing end @@ -244,6 +281,8 @@ function process_allocation_graph_edges!( return capacity end +const allocation_source_nodetypes = Set{Symbol}([:level_boundary, :flow_boundary]) + """ The source nodes must only have one allocation outneighbor and no allocation inneighbors. """ @@ -254,24 +293,23 @@ function valid_sources(p::Parameters, allocation_network_id::Int)::Bool errors = false - for (id_source, id_dst) in edge_ids + for edge in edge_ids + (id_source, id_dst) = edge if graph[id_source, id_dst].allocation_network_id_source == allocation_network_id - ids_allocation_in = [ - label for label in inneighbor_labels(graph, id_source) if - graph[label, id_source].allocation_flow - ] - if length(ids_allocation_in) !== 0 - errors = true - @error "Source edge ($id_source, $id_dst) is not an entry point of subnetwork $allocation_network_id" - end + from_source_node = graph[id_source].type in allocation_source_nodetypes - ids_allocation_out = [ - label for label in outneighbor_labels(graph, id_source) if - graph[id_source, label].allocation_flow - ] - if length(ids_allocation_out) !== 1 - errors = true - @error "Source edge ($id_source, $id_dst) is not the only allocation edge coming from $id_source" + if is_main_network(allocation_network_id) + if !from_source_node + errors = true + @error "The source node of source edge $edge in the main network must be one of $allocation_source_nodetypes." + end + else + from_main_network = is_main_network(graph[id_source].allocation_network_id) + + if !from_source_node && !from_main_network + errors = true + @error "The source node of source edge $edge for subnetwork $allocation_network_id is neither a source node nor is it coming from the main network." + end end end end @@ -301,6 +339,25 @@ function avoid_using_own_returnflow!(p::Parameters, allocation_network_id::Int): return nothing end +""" +Add the edges connecting the main network work to a subnetwork to both the main network +and subnetwork allocation graph. +""" +function add_subnetwork_connections!(p::Parameters, allocation_network_id::Int)::Nothing + (; graph, allocation) = p + (; main_network_connections) = allocation + edge_ids = graph[].edge_ids[allocation_network_id] + + if is_main_network(allocation_network_id) + for connections in main_network_connections + union!(edge_ids, connections) + end + else + union!(edge_ids, get_main_network_connections(p, allocation_network_id)) + end + return nothing +end + """ Build the graph used for the allocation problem. """ @@ -316,6 +373,7 @@ function allocation_graph( # Process the edges in the allocation graph process_allocation_graph_edges!(capacity, edges_composite, p, allocation_network_id) + add_subnetwork_connections!(p, allocation_network_id) if !valid_sources(p, allocation_network_id) error("Errors in sources in allocation graph.") @@ -355,10 +413,21 @@ function add_variables_absolute_value!( allocation_network_id::Int, config::Config, )::Nothing - (; graph) = p - node_ids = graph[].node_ids[allocation_network_id] - node_ids_user = [node_id for node_id in node_ids if graph[node_id].type == :user] + (; graph, allocation) = p + (; main_network_connections) = allocation if startswith(config.allocation.objective_type, "linear") + node_ids = graph[].node_ids[allocation_network_id] + node_ids_user = [node_id for node_id in node_ids if graph[node_id].type == :user] + + # For the main network, connections to subnetworks are treated as users + if is_main_network(allocation_network_id) + for connections_subnetwork in main_network_connections + for connection in connections_subnetwork + push!(node_ids_user, connection[2]) + end + end + end + problem[:F_abs] = JuMP.@variable(problem, F_abs[node_id = node_ids_user]) end return nothing @@ -379,11 +448,12 @@ function add_constraints_capacity!( allocation_network_id::Int, )::Nothing (; graph) = p + main_network_source_edges = get_main_network_connections(p, allocation_network_id) F = problem[:F] edge_ids = graph[].edge_ids[allocation_network_id] edge_ids_finite_capacity = Tuple{NodeID, NodeID}[] for edge in edge_ids - if !isinf(capacity[edge...]) + if !isinf(capacity[edge...]) && edge ∉ main_network_source_edges push!(edge_ids_finite_capacity, edge) end end @@ -465,13 +535,19 @@ function add_constraints_flow_conservation!( p::Parameters, allocation_network_id::Int, )::Nothing - (; graph) = p + (; graph, allocation) = p F = problem[:F] node_ids = graph[].node_ids[allocation_network_id] - node_ids_basin = [node_id for node_id in node_ids if graph[node_id].type == :basin] + node_ids_conservation = + [node_id for node_id in node_ids if graph[node_id].type == :basin] + main_network_source_edges = get_main_network_connections(p, allocation_network_id) + for edge in main_network_source_edges + push!(node_ids_conservation, edge[2]) + end + unique!(node_ids_conservation) problem[:flow_conservation] = JuMP.@constraint( problem, - [node_id = node_ids_basin], + [node_id = node_ids_conservation], sum([ F[(node_id, outneighbor_id)] for outneighbor_id in outflow_ids_allocation(graph, node_id) @@ -527,12 +603,23 @@ function add_constraints_absolute_value!( allocation_network_id::Int, config::Config, )::Nothing - (; graph) = p - node_ids = graph[].node_ids[allocation_network_id] + (; graph, allocation) = p + (; main_network_connections) = allocation objective_type = config.allocation.objective_type if startswith(objective_type, "linear") + node_ids = graph[].node_ids[allocation_network_id] node_ids_user = [node_id for node_id in node_ids if graph[node_id].type == :user] + + # For the main network, connections to subnetworks are treated as users + if is_main_network(allocation_network_id) + for connections_subnetwork in main_network_connections + for connection in connections_subnetwork + push!(node_ids_user, connection[2]) + end + end + end + node_ids_user_inflow = Dict( node_id_user => only(inflow_ids_allocation(graph, node_id_user)) for node_id_user in node_ids_user @@ -693,6 +780,60 @@ function AllocationModel( ) end +""" +Add a term to the expression of the objective function corresponding to +the demand of a user. +""" +function add_user_term!( + ex::Union{JuMP.QuadExpr, JuMP.AffExpr}, + edge::Tuple{NodeID, NodeID}, + objective_type::Symbol, + demand::Float64, + model::AllocationModel, +)::Nothing + (; problem) = model + F = problem[:F] + F_edge = F[edge] + node_id_user = edge[2] + + if objective_type == :quadratic_absolute + # Objective function ∑ (F - d)^2 + JuMP.add_to_expression!(ex, 1, F_edge, F_edge) + JuMP.add_to_expression!(ex, -2 * demand, F_edge) + JuMP.add_to_expression!(ex, demand^2) + + elseif objective_type == :quadratic_relative + # Objective function ∑ (1 - F/d)^2 + if demand ≈ 0 + return nothing + end + JuMP.add_to_expression!(ex, 1.0 / demand^2, F_edge, F_edge) + JuMP.add_to_expression!(ex, -2.0 / demand, F_edge) + JuMP.add_to_expression!(ex, 1.0) + + elseif objective_type == :linear_absolute + # Objective function ∑ |F - d| + JuMP.set_normalized_rhs(problem[:abs_positive][node_id_user], -demand) + JuMP.set_normalized_rhs(problem[:abs_negative][node_id_user], demand) + + elseif objective_type == :linear_relative + # Objective function ∑ |1 - F/d| + JuMP.set_normalized_coefficient( + problem[:abs_positive][node_id_user], + F_edge, + iszero(demand) ? 0 : 1 / demand, + ) + JuMP.set_normalized_coefficient( + problem[:abs_negative][node_id_user], + F_edge, + iszero(demand) ? 0 : -1 / demand, + ) + else + error("Invalid allocation objective type $objective_type.") + end + return nothing +end + """ Set the objective for the given priority. For an objective with absolute values this also involves adjusting constraints. @@ -704,8 +845,9 @@ function set_objective_priority!( priority_idx::Int, )::Nothing (; objective_type, problem, allocation_network_id) = allocation_model - (; graph, user) = p + (; graph, user, allocation) = p (; demand, node_id) = user + (; main_network_connections, subnetwork_demands) = allocation edge_ids = graph[].edge_ids[allocation_network_id] F = problem[:F] @@ -717,6 +859,18 @@ function set_objective_priority!( demand_max = 0.0 + # Terms for subnetworks as users + if is_main_network(allocation_network_id) + for connections_subnetwork in main_network_connections + for connection in connections_subnetwork + d = subnetwork_demands[connection][priority_idx] + demand_max = max(demand_max, d) + add_user_term!(ex, connection, objective_type, d, allocation_model) + end + end + end + + # Terms for user nodes for edge_id in edge_ids node_id_user = edge_id[2] if graph[node_id_user].type != :user @@ -726,43 +880,7 @@ function set_objective_priority!( user_idx = findsorted(node_id, node_id_user) d = demand[user_idx][priority_idx](t) demand_max = max(demand_max, d) - F_edge = F[edge_id] - - if objective_type == :quadratic_absolute - # Objective function ∑ (F - d)^2 - JuMP.add_to_expression!(ex, 1, F_edge, F_edge) - JuMP.add_to_expression!(ex, -2 * d, F_edge) - JuMP.add_to_expression!(ex, d^2) - - elseif objective_type == :quadratic_relative - # Objective function ∑ (1 - F/d)^2S - if d ≈ 0 - continue - end - JuMP.add_to_expression!(ex, 1.0 / d^2, F_edge, F_edge) - JuMP.add_to_expression!(ex, -2.0 / d, F_edge) - JuMP.add_to_expression!(ex, 1.0) - - elseif objective_type == :linear_absolute - # Objective function ∑ |F - d| - JuMP.set_normalized_rhs(problem[:abs_positive][node_id_user], -d) - JuMP.set_normalized_rhs(problem[:abs_negative][node_id_user], d) - - elseif objective_type == :linear_relative - # Objective function ∑ |1 - F/d| - JuMP.set_normalized_coefficient( - problem[:abs_positive][node_id_user], - F_edge, - iszero(d) ? 0 : 1 / d, - ) - JuMP.set_normalized_coefficient( - problem[:abs_negative][node_id_user], - F_edge, - iszero(d) ? 0 : -1 / d, - ) - else - error("Invalid allocation objective type $objective_type.") - end + add_user_term!(ex, edge_id, objective_type, d, allocation_model) end # Add flow cost @@ -792,35 +910,66 @@ function assign_allocations!( allocation_model::AllocationModel, p::Parameters, t::Float64, - priority_idx::Int, + priority_idx::Int; + collect_demands::Bool = false, )::Nothing (; problem, allocation_network_id) = allocation_model - (; graph, user) = p + (; graph, user, allocation) = p + (; + subnetwork_demands, + subnetwork_allocateds, + allocation_network_ids, + main_network_connections, + ) = allocation (; record) = user edge_ids = graph[].edge_ids[allocation_network_id] + main_network_source_edges = get_main_network_connections(p, allocation_network_id) F = problem[:F] for edge_id in edge_ids + # If this edge is a source edge from the main network to a subnetwork, + # and demands are being collected, add its flow to the demand of this edge + if collect_demands && + graph[edge_id...].allocation_network_id_source == allocation_network_id && + edge_id ∈ main_network_source_edges + allocated = JuMP.value(F[edge_id]) + subnetwork_demands[edge_id][priority_idx] += allocated + end + user_node_id = edge_id[2] - if graph[user_node_id].type != :user - continue + + if graph[user_node_id].type == :user + allocated = JuMP.value(F[edge_id]) + user_idx = findsorted(user.node_id, user_node_id) + user.allocated[user_idx][priority_idx] = allocated + + # Save allocations to record + push!(record.time, t) + push!(record.allocation_network_id, allocation_model.allocation_network_id) + push!(record.user_node_id, Int(user_node_id)) + push!(record.priority, user.priorities[priority_idx]) + push!(record.demand, user.demand[user_idx][priority_idx](t)) + push!(record.allocated, allocated) + # TODO: This is now the last abstraction before the allocation update, + # should be the average abstraction since the last allocation solve + push!( + record.abstracted, + get_flow(graph, inflow_id(graph, user_node_id), user_node_id, 0), + ) + end + end + + # Write the flows to the subnetworks as allocated flows + # in the allocation object + if is_main_network(allocation_network_id) + for (allocation_network_id, main_network_source_edges) in + zip(allocation_network_ids, main_network_connections) + if is_main_network(allocation_network_id) + continue + end + for edge_id in main_network_source_edges + subnetwork_allocateds[edge_id][priority_idx] = JuMP.value(F[edge_id]) + end end - user_idx = findsorted(user.node_id, user_node_id) - allocated = JuMP.value(F[edge_id]) - user.allocated[user_idx][priority_idx] = allocated - - # Save allocations to record - push!(record.time, t) - push!(record.allocation_network_id, allocation_model.allocation_network_id) - push!(record.user_node_id, Int(user_node_id)) - push!(record.priority, user.priorities[priority_idx]) - push!(record.demand, user.demand[user_idx][priority_idx](t)) - push!(record.allocated, allocated) - # TODO: This is now the last abstraction before the allocation update, - # should be the average abstraction since the last allocation solve - push!( - record.abstracted, - get_flow(graph, inflow_id(graph, user_node_id), user_node_id, 0), - ) end return nothing end @@ -828,27 +977,43 @@ end """ Adjust the source flows. """ -function adjust_source_flows!( +function adjust_source_capacities!( allocation_model::AllocationModel, p::Parameters, - priority_idx::Int, + priority_idx::Int; + collect_demands::Bool = false, )::Nothing (; problem) = allocation_model - (; graph) = p + (; graph, allocation) = p (; allocation_network_id) = allocation_model + (; subnetwork_allocateds) = allocation edge_ids = graph[].edge_ids[allocation_network_id] source_constraints = problem[:source] F = problem[:F] - # It is assumed that the allocation procedure does not have to be differentiated. + main_network_source_edges = get_main_network_connections(p, allocation_network_id) + for edge_id in edge_ids - # If it is a source edge. if graph[edge_id...].allocation_network_id_source == allocation_network_id + # If it is a source edge for this allocation problem if priority_idx == 1 - # Reset the source to the current flow. + # If the optimization was just started, i.e. sources have to be reset + if edge_id in main_network_source_edges + if collect_demands + # Set the source capacity to effectively unlimited if subnetwork demands are being collected + source_capacity = Inf + else + # Set the source capacity to the value allocated to the subnetwork over this edge + source_capacity = subnetwork_allocateds[edge_id][priority_idx] + end + else + # Reset the source to the current flow from the physical layer. + source_capacity = get_flow(graph, edge_id..., 0) + end JuMP.set_normalized_rhs( source_constraints[edge_id], - get_flow(graph, edge_id..., 0), + # It is assumed that the allocation procedure does not have to be differentiated. + source_capacity, ) else # Subtract the allocated flow from the source. @@ -880,11 +1045,15 @@ function adjust_edge_capacities!( constraints_capacity = problem[:capacity] F = problem[:F] + main_network_source_edges = get_main_network_connections(p, allocation_network_id) + for edge_id in edge_ids c = capacity[edge_id...] - # Edges with infinite capacity have no capacity constraints - if isinf(c) + # These edges have no capacity constraints: + # - With infinite capacity + # - Being a source from the main network to a subnetwork + if isinf(c) || edge_id ∈ main_network_source_edges continue end @@ -910,9 +1079,11 @@ function save_allocation_flows!( t::Float64, allocation_model::AllocationModel, priority::Int, + collect_demands::Bool, )::Nothing (; problem, allocation_network_id) = allocation_model - (; allocation_record, graph) = p + (; allocation, graph) = p + (; record) = allocation F = problem[:F] for allocation_edge in first(F.axes) @@ -921,13 +1092,14 @@ function save_allocation_flows!( (; node_ids) = edge_metadata for i in eachindex(node_ids)[1:(end - 1)] - push!(allocation_record.time, t) - push!(allocation_record.edge_id, edge_metadata.id) - push!(allocation_record.from_node_id, node_ids[i]) - push!(allocation_record.to_node_id, node_ids[i + 1]) - push!(allocation_record.allocation_network_id, allocation_network_id) - push!(allocation_record.priority, priority) - push!(allocation_record.flow, flow) + push!(record.time, t) + push!(record.edge_id, edge_metadata.id) + push!(record.from_node_id, node_ids[i]) + push!(record.to_node_id, node_ids[i + 1]) + push!(record.allocation_network_id, allocation_network_id) + push!(record.priority, priority) + push!(record.flow, flow) + push!(record.collect_demands, collect_demands) end end return nothing @@ -937,18 +1109,34 @@ end Update the allocation optimization problem for the given subnetwork with the problem state and flows, solve the allocation problem and assign the results to the users. """ -function allocate!(p::Parameters, allocation_model::AllocationModel, t::Float64)::Nothing - (; user) = p - (; problem) = allocation_model +function allocate!( + p::Parameters, + allocation_model::AllocationModel, + t::Float64; + collect_demands::Bool = false, +)::Nothing + (; user, allocation) = p + (; problem, allocation_network_id) = allocation_model (; priorities) = user + (; subnetwork_demands) = allocation # TODO: Compute basin flow from vertical fluxes and basin volume. # Set as basin demand if the net flow is negative, set as source # in the flow_conservation constraints if the net flow is positive. # Solve this as a separate problem before the priorities below + main_network_source_edges = get_main_network_connections(p, allocation_network_id) + + if collect_demands + for main_network_connection in keys(subnetwork_demands) + if main_network_connection in main_network_source_edges + subnetwork_demands[main_network_connection] .= 0.0 + end + end + end + for priority_idx in eachindex(priorities) - adjust_source_flows!(allocation_model, p, priority_idx) + adjust_source_capacities!(allocation_model, p, priority_idx; collect_demands) # Subtract the flows used by the allocation of the previous priority from the capacities of the edges # or set edge capacities if priority_idx = 1 @@ -965,13 +1153,23 @@ function allocate!(p::Parameters, allocation_model::AllocationModel, t::Float64) JuMP.optimize!(problem) @debug JuMP.solution_summary(problem) if JuMP.termination_status(problem) !== JuMP.OPTIMAL - error("Allocation coudn't find optimal solution.") + (; allocation_network_id) = allocation_model + priority = priorities[priority_index] + error( + "Allocation of subnetwork $allocation_network_id, priority $priority coudn't find optimal solution.", + ) end # Assign the allocations to the users for this priority assign_allocations!(allocation_model, p, t, priority_idx) # Save the flows over all edges in the subnetwork - save_allocation_flows!(p, t, allocation_model, priorities[priority_idx]) + save_allocation_flows!( + p, + t, + allocation_model, + priorities[priority_idx], + collect_demands, + ) end end diff --git a/core/src/bmi.jl b/core/src/bmi.jl index bae593e9a..8b6df4d77 100644 --- a/core/src/bmi.jl +++ b/core/src/bmi.jl @@ -481,16 +481,30 @@ function discrete_control_affect!( return control_state_change end -function get_allocation_model( +function get_allocation_model(p::Parameters, allocation_network_id::Int)::AllocationModel + (; allocation) = p + (; allocation_network_ids, allocation_models) = allocation + idx = findsorted(allocation_network_ids, allocation_network_id) + if isnothing(idx) + error("Invalid allocation network ID $allocation_network_id.") + else + return allocation_models[idx] + end +end + +function get_main_network_connections( p::Parameters, allocation_network_id::Int, -)::Union{AllocationModel, Nothing} - for allocation_model in p.allocation_models - if allocation_model.allocation_network_id == allocation_network_id - return allocation_model - end +)::Vector{Tuple{NodeID, NodeID}} + (; allocation) = p + (; allocation_network_ids, main_network_connections) = allocation + idx = findsorted(allocation_network_ids, allocation_network_id) + if isnothing(idx) + error("Invalid allocation network ID $allocation_network_id.") + else + return main_network_connections[idx] end - return nothing + return end """ @@ -594,7 +608,20 @@ end "Solve the allocation problem for all users and assign allocated abstractions to user nodes." function update_allocation!(integrator)::Nothing (; p, t) = integrator - for allocation_model in integrator.p.allocation_models + (; allocation) = p + (; allocation_models) = allocation + + # If a main network is present, collect demands of subnetworks + if has_main_network(allocation) + for allocation_model in Iterators.drop(allocation_models, 1) + allocate!(p, allocation_model, t; collect_demands = true) + end + end + + # Solve the allocation problems + # If a main network is present this is solved first, + # which provides allocation to the subnetworks + for allocation_model in allocation_models allocate!(p, allocation_model, t) end end diff --git a/core/src/create.jl b/core/src/create.jl index 3bc8b70c8..0c571e7af 100644 --- a/core/src/create.jl +++ b/core/src/create.jl @@ -196,10 +196,26 @@ end const nonconservative_nodetypes = Set{String}(["Basin", "LevelBoundary", "FlowBoundary", "Terminal", "User"]) -function generate_allocation_models!(p::Parameters, config::Config)::Nothing - (; graph, allocation_models) = p +function initialize_allocation!(p::Parameters, config::Config)::Nothing + (; graph, allocation) = p + (; allocation_network_ids, allocation_models, main_network_connections) = allocation + allocation_network_ids_ = sort(collect(keys(graph[].node_ids))) - for allocation_network_id in keys(graph[].node_ids) + errors = non_positive_allocation_network_id(graph) + if errors + error("Allocation network initialization failed.") + end + + for allocation_network_id in allocation_network_ids_ + push!(allocation_network_ids, allocation_network_id) + push!(main_network_connections, Tuple{NodeID, NodeID}[]) + end + + if first(allocation_network_ids_) == 1 + find_subnetwork_connections!(p) + end + + for allocation_network_id in allocation_network_ids_ push!( allocation_models, AllocationModel(config, allocation_network_id, p, config.allocation.timestep), @@ -479,10 +495,10 @@ function Basin(db::DB, config::Config, chunk_sizes::Vector{Int})::Basin current_area = DiffCache(current_area, chunk_sizes) end - precipitation = fill(NaN, length(node_id)) - potential_evaporation = fill(NaN, length(node_id)) - drainage = fill(NaN, length(node_id)) - infiltration = fill(NaN, length(node_id)) + precipitation = zeros(length(node_id)) + potential_evaporation = zeros(length(node_id)) + drainage = zeros(length(node_id)) + infiltration = zeros(length(node_id)) table = (; precipitation, potential_evaporation, drainage, infiltration) area, level, storage = create_storage_tables(db, config) @@ -628,7 +644,7 @@ function User(db::DB, config::Config)::User error("Problems encountered when parsing User static and time node IDs.") end - # The highest priority number given, which corresponds to the least important demands + # All provided priorities priorities = sort(unique(union(static.priority, time.priority))) active = BitVector() @@ -792,16 +808,22 @@ function Parameters(db::DB, config::Config)::Parameters n_states = length(get_ids(db, "Basin")) + length(get_ids(db, "PidControl")) chunk_sizes = get_chunk_sizes(config, n_states) graph = create_graph(db, config, chunk_sizes) - allocation_models = Vector{AllocationModel}() - allocation_record = (; - time = Float64[], - edge_id = Int[], - from_node_id = Int[], - to_node_id = Int[], - allocation_network_id = Int[], - priority = Int[], - flow = Float64[], - # BitVector() + allocation = Allocation( + Int[], + AllocationModel[], + Vector{Tuple{NodeID, NodeID}}[], + Dict{Tuple{NodeID, NodeID}, Float64}(), + Dict{Tuple{NodeID, NodeID}, Float64}(), + (; + time = Float64[], + edge_id = Int[], + from_node_id = Int[], + to_node_id = Int[], + allocation_network_id = Int[], + priority = Int[], + flow = Float64[], + collect_demands = BitVector(), + ), ) if !valid_edges(graph) @@ -839,8 +861,7 @@ function Parameters(db::DB, config::Config)::Parameters p = Parameters( config.starttime, graph, - allocation_models, - allocation_record, + allocation, basin, linear_resistance, manning_resistance, @@ -864,7 +885,7 @@ function Parameters(db::DB, config::Config)::Parameters # Allocation data structures if config.allocation.use_allocation - generate_allocation_models!(p, config) + initialize_allocation!(p, config) end return p end diff --git a/core/src/io.jl b/core/src/io.jl index 0f35ac12c..55482e0e5 100644 --- a/core/src/io.jl +++ b/core/src/io.jl @@ -276,21 +276,22 @@ function allocation_flow_table( allocation_network_id::Vector{Int}, priority::Vector{Int}, flow::Vector{Float64}, - # gathering_demands::BitVector + collect_demands::BitVector, } (; config) = model - (; allocation_record) = model.integrator.p + (; record) = model.integrator.p.allocation - time = datetime_since.(allocation_record.time, config.starttime) + time = datetime_since.(record.time, config.starttime) return (; time, - allocation_record.edge_id, - allocation_record.from_node_id, - allocation_record.to_node_id, - allocation_record.allocation_network_id, - allocation_record.priority, - allocation_record.flow, + record.edge_id, + record.from_node_id, + record.to_node_id, + record.allocation_network_id, + record.priority, + record.flow, + record.collect_demands, ) end diff --git a/build/libribasim/src/libribasim.jl b/core/src/libribasim.jl similarity index 99% rename from build/libribasim/src/libribasim.jl rename to core/src/libribasim.jl index 448d68d02..e8af88f50 100644 --- a/build/libribasim/src/libribasim.jl +++ b/core/src/libribasim.jl @@ -1,7 +1,7 @@ module libribasim import BasicModelInterface as BMI -using Ribasim +import ..Ribasim # globals model::Union{Ribasim.Model, Nothing} = nothing diff --git a/core/src/main.jl b/core/src/main.jl index fb621c0e9..465dfe4be 100644 --- a/core/src/main.jl +++ b/core/src/main.jl @@ -5,9 +5,13 @@ function help(x::AbstractString)::Cint end main(toml_path::AbstractString)::Cint = main([toml_path]) +main()::Cint = main(ARGS) """ + main(toml_path::AbstractString)::Cint main(ARGS::Vector{String})::Cint + main()::Cint + This is the main entry point of the application. Performs argument parsing and sets up logging for both terminal and file. Calls Ribasim.run() and handles exceptions to convert to exit codes. diff --git a/core/src/solve.jl b/core/src/solve.jl index cab9e1ada..317453640 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -21,6 +21,34 @@ struct AllocationModel Δt_allocation::Float64 end +""" +Object for all information about allocation +allocation_network_ids: The unique sorted allocation network IDs +allocation models: The allocation models for the main network and subnetworks corresponding to + allocation_network_ids +main_network_connections: (from_id, to_id) from the main network to the subnetwork per subnetwork +subnetwork_demands: The demand of an edge from the main network to a subnetwork +record: A record of all flows computed by allocation optimization, eventually saved to + output file +""" +struct Allocation + allocation_network_ids::Vector{Int} + allocation_models::Vector{AllocationModel} + main_network_connections::Vector{Vector{Tuple{NodeID, NodeID}}} + subnetwork_demands::Dict{Tuple{NodeID, NodeID}, Vector{Float64}} + subnetwork_allocateds::Dict{Tuple{NodeID, NodeID}, Vector{Float64}} + record::@NamedTuple{ + time::Vector{Float64}, + edge_id::Vector{Int}, + from_node_id::Vector{Int}, + to_node_id::Vector{Int}, + allocation_network_id::Vector{Int}, + priority::Vector{Int}, + flow::Vector{Float64}, + collect_demands::BitVector, + } +end + @enumx EdgeType flow control none """ @@ -418,6 +446,32 @@ struct User <: AbstractParameterNode allocated::Vector{Float64}, abstracted::Vector{Float64}, } + + function User( + node_id, + active, + demand, + allocated, + return_factor, + min_level, + priorities, + record, + ) + if valid_demand(node_id, demand, priorities) + return new( + node_id, + active, + demand, + allocated, + return_factor, + min_level, + priorities, + record, + ) + else + error("Invalid demand") + end + end end "Subgrid linearly interpolates basin levels." @@ -448,18 +502,7 @@ struct Parameters{T, C1, C2} MetaGraphsNext.var"#11#13", Float64, } - allocation_models::Vector{AllocationModel} - # TODO: Move to p.allocation - allocation_record::@NamedTuple{ - time::Vector{Float64}, - edge_id::Vector{Int}, - from_node_id::Vector{Int}, - to_node_id::Vector{Int}, - allocation_network_id::Vector{Int}, - priority::Vector{Int}, - flow::Vector{Float64}, - # gathering_demands::BitVector - } + allocation::Allocation basin::Basin{T, C1} linear_resistance::LinearResistance manning_resistance::ManningResistance diff --git a/core/src/utils.jl b/core/src/utils.jl index 5e7e4334c..b90f08237 100644 --- a/core/src/utils.jl +++ b/core/src/utils.jl @@ -42,12 +42,19 @@ function create_graph(db::DB, config::Config, chunk_sizes::Vector{Int})::MetaGra db, "SELECT fid, from_node_id, to_node_id, edge_type, allocation_network_id FROM Edge ORDER BY fid", ) + # Node IDs per subnetwork node_ids = Dict{Int, Set{NodeID}}() + # Allocation edges per subnetwork edge_ids = Dict{Int, Set{Tuple{NodeID, NodeID}}}() + # Source edges per subnetwork edges_source = Dict{Int, Set{EdgeMetadata}}() + # The number of flow edges flow_counter = 0 + # Dictionary from flow edge to index in flow vector flow_dict = Dict{Tuple{NodeID, NodeID}, Int}() + # The number of nodes with vertical flow (interaction with outside of model) flow_vertical_counter = 0 + # Dictionary from node ID to index in vertical flow vector flow_vertical_dict = Dict{NodeID, Int}() graph = MetaGraph( DiGraph(); @@ -108,6 +115,10 @@ function create_graph(db::DB, config::Config, chunk_sizes::Vector{Int})::MetaGra end end + if incomplete_subnetwork(graph, node_ids) + error("Incomplete connectivity in subnetwork") + end + flow = zeros(flow_counter) flow_vertical = zeros(flow_vertical_counter) if config.solver.autodiff @@ -628,12 +639,12 @@ end Update `table` at row index `i`, with the values of a given row. `table` must be a NamedTuple of vectors with all variables that must be loaded. The row must contain all the column names that are present in the table. -If a value is NaN, it is not set. +If a value is missing, it is not set. """ function set_table_row!(table::NamedTuple, row, i::Int)::NamedTuple for (symbol, vector) in pairs(table) val = getproperty(row, symbol) - if !isnan(val) + if !ismissing(val) vector[i] = val end end @@ -675,7 +686,7 @@ function set_current_value!( for (i, id) in enumerate(node_id) for (symbol, vector) in pairs(table) idx = findlast( - row -> row.node_id == id && !isnan(getproperty(row, symbol)), + row -> row.node_id == id && !ismissing(getproperty(row, symbol)), pre_table, ) if idx !== nothing @@ -1263,3 +1274,11 @@ function allocation_path_exists_in_graph( end return false end + +function has_main_network(allocation::Allocation)::Bool + return first(allocation.allocation_network_ids) == 1 +end + +function is_main_network(allocation_network_id::Int)::Bool + return allocation_network_id == 1 +end diff --git a/core/src/validation.jl b/core/src/validation.jl index 8ff6e59cc..98d99d03c 100644 --- a/core/src/validation.jl +++ b/core/src/validation.jl @@ -185,11 +185,11 @@ end @version BasinTimeV1 begin node_id::Int time::DateTime - drainage::Float64 - potential_evaporation::Float64 - infiltration::Float64 - precipitation::Float64 - urban_runoff::Float64 + drainage::Union{Missing, Float64} + potential_evaporation::Union{Missing, Float64} + infiltration::Union{Missing, Float64} + precipitation::Union{Missing, Float64} + urban_runoff::Union{Missing, Float64} end @version BasinProfileV1 begin @@ -636,3 +636,46 @@ function valid_subgrid( return !errors end + +function valid_demand( + node_id::Vector{NodeID}, + demand::Vector{ + Vector{LinearInterpolation{Vector{Float64}, Vector{Float64}, true, Float64}}, + }, + priorities::Vector{Int}, +)::Bool + errors = false + + for (col, id) in zip(demand, node_id) + for (demand_p_itp, p_itp) in zip(col, priorities) + if any(demand_p_itp.u .< 0.0) + @error "Demand of user node $id with priority $p_itp should be non-negative" + errors = true + end + end + end + return !errors +end + +function incomplete_subnetwork(graph::MetaGraph, node_ids::Dict{Int, Set{NodeID}})::Bool + errors = false + for (allocation_network_id, subnetwork_node_ids) in node_ids + subnetwork, _ = induced_subgraph(graph, code_for.(Ref(graph), subnetwork_node_ids)) + if !is_connected(subnetwork) + @error "All nodes in subnetwork $allocation_network_id should be connected" + errors = true + end + end + return errors +end + +function non_positive_allocation_network_id(graph::MetaGraph)::Bool + errors = false + for allocation_network_id in keys(graph[].node_ids) + if (allocation_network_id <= 0) + @error "Allocation network id $allocation_network_id needs to be a positive integer." + errors = true + end + end + return errors +end diff --git a/core/test/allocation_test.jl b/core/test/allocation_test.jl index e67164ee5..35632e181 100644 --- a/core/test/allocation_test.jl +++ b/core/test/allocation_test.jl @@ -1,5 +1,4 @@ @testitem "Allocation solve" begin - using PreallocationTools: get_tmp using Ribasim: NodeID import SQLite import JuMP @@ -25,7 +24,7 @@ end Ribasim.set_flow!(graph, NodeID(1), NodeID(2), 4.5) # Source flow - allocation_model = p.allocation_models[1] + allocation_model = p.allocation.allocation_models[1] Ribasim.allocate!(p, allocation_model, 0.0) F = allocation_model.problem[:F] @@ -55,7 +54,7 @@ end config = Ribasim.Config(toml_path; allocation_objective_type = "quadratic_absolute") model = Ribasim.run(config) @test successful_retcode(model) - problem = model.integrator.p.allocation_models[1].problem + problem = model.integrator.p.allocation.allocation_models[1].problem objective = JuMP.objective_function(problem) @test objective isa JuMP.QuadExpr # Quadratic expression F = problem[:F] @@ -71,7 +70,7 @@ end config = Ribasim.Config(toml_path; allocation_objective_type = "quadratic_relative") model = Ribasim.run(config) @test successful_retcode(model) - problem = model.integrator.p.allocation_models[1].problem + problem = model.integrator.p.allocation.allocation_models[1].problem objective = JuMP.objective_function(problem) @test objective isa JuMP.QuadExpr # Quadratic expression @test objective.aff.constant == 2.0 @@ -88,7 +87,7 @@ end config = Ribasim.Config(toml_path; allocation_objective_type = "linear_absolute") model = Ribasim.run(config) @test successful_retcode(model) - problem = model.integrator.p.allocation_models[1].problem + problem = model.integrator.p.allocation.allocation_models[1].problem objective = JuMP.objective_function(problem) @test objective isa JuMP.AffExpr # Affine expression @test :F_abs in keys(problem.obj_dict) @@ -105,7 +104,7 @@ end config = Ribasim.Config(toml_path; allocation_objective_type = "linear_relative") model = Ribasim.run(config) @test successful_retcode(model) - problem = model.integrator.p.allocation_models[1].problem + problem = model.integrator.p.allocation.allocation_models[1].problem objective = JuMP.objective_function(problem) @test objective isa JuMP.AffExpr # Affine expression @test :F_abs in keys(problem.obj_dict) @@ -131,7 +130,7 @@ end "../../generated_testmodels/fractional_flow_subnetwork/ribasim.toml", ) model = Ribasim.BMI.initialize(Ribasim.Model, toml_path) - problem = model.integrator.p.allocation_models[1].problem + problem = model.integrator.p.allocation.allocation_models[1].problem F = problem[:F] @test JuMP.normalized_coefficient( problem[:fractional_flow][(NodeID(3), NodeID(5))], @@ -165,7 +164,7 @@ end @test record_control.control_state == ["A", "B"] fractional_flow_constraints = - model.integrator.p.allocation_models[1].problem[:fractional_flow] + model.integrator.p.allocation.allocation_models[1].problem[:fractional_flow] @test JuMP.normalized_coefficient( problem[:fractional_flow][(NodeID(3), NodeID(5))], F[(NodeID(2), NodeID(3))], @@ -175,3 +174,105 @@ end F[(NodeID(2), NodeID(3))], ) ≈ -0.25 end + +@testitem "main allocation network initialization" begin + using SQLite + using Ribasim: NodeID + + toml_path = normpath( + @__DIR__, + "../../generated_testmodels/main_network_with_subnetworks/ribasim.toml", + ) + @test ispath(toml_path) + cfg = Ribasim.Config(toml_path) + db_path = Ribasim.input_path(cfg, cfg.database) + db = SQLite.DB(db_path) + p = Ribasim.Parameters(db, cfg) + close(db) + (; allocation, graph) = p + (; main_network_connections, allocation_network_ids) = allocation + @test Ribasim.has_main_network(allocation) + @test Ribasim.is_main_network(first(allocation_network_ids)) + + # Connections from main network to subnetworks + @test isempty(main_network_connections[1]) + @test only(main_network_connections[2]) == (NodeID(2), NodeID(11)) + @test only(main_network_connections[3]) == (NodeID(6), NodeID(24)) + @test only(main_network_connections[4]) == (NodeID(10), NodeID(38)) + + # main-sub connections are part of main network allocation graph + allocation_edges_main_network = graph[].edge_ids[1] + @test Tuple{NodeID, NodeID}[(2, 11), (6, 24), (10, 38)] ⊆ allocation_edges_main_network + + # Subnetworks interpreted as users require variables and constraints to + # support absolute value expressions in the objective function + allocation_model_main_network = Ribasim.get_allocation_model(p, 1) + problem = allocation_model_main_network.problem + @test problem[:F_abs].axes[1] == NodeID[11, 24, 38] + @test problem[:abs_positive].axes[1] == NodeID[11, 24, 38] + @test problem[:abs_negative].axes[1] == NodeID[11, 24, 38] + + # In each subnetwork, the connection from the main network to the subnetwork is + # interpreted as a source + @test Ribasim.get_allocation_model(p, 3).problem[:source].axes[1] == + Tuple{NodeID, NodeID}[(2, 11)] + @test Ribasim.get_allocation_model(p, 5).problem[:source].axes[1] == + Tuple{NodeID, NodeID}[(6, 24)] + @test Ribasim.get_allocation_model(p, 7).problem[:source].axes[1] == + Tuple{NodeID, NodeID}[(10, 38)] +end + +@testitem "allocation with main network optimization problem" begin + using SQLite + using Ribasim: NodeID + using JuMP + + toml_path = normpath( + @__DIR__, + "../../generated_testmodels/main_network_with_subnetworks/ribasim.toml", + ) + @test ispath(toml_path) + cfg = Ribasim.Config(toml_path) + db_path = Ribasim.input_path(cfg, cfg.database) + db = SQLite.DB(db_path) + p = Ribasim.Parameters(db, cfg) + close(db) + + (; allocation, user, graph) = p + (; allocation_models, subnetwork_demands, subnetwork_allocateds) = allocation + t = 0.0 + + # Collecting demands + for allocation_model in allocation_models[2:end] + Ribasim.allocate!(p, allocation_model, t; collect_demands = true) + end + + @test subnetwork_demands[(NodeID(2), NodeID(11))] ≈ [4.0, 4.0, 0.0] + @test subnetwork_demands[(NodeID(6), NodeID(24))] ≈ [0.001333333333, 0.0, 0.0] + @test subnetwork_demands[(NodeID(10), NodeID(38))] ≈ [0.001, 0.002, 0.002] + + # Solving for the main network, + # containing subnetworks as users + allocation_model = allocation_models[1] + (; problem) = allocation_model + Ribasim.allocate!(p, allocation_model, t) + + # Main network objective function + objective = JuMP.objective_function(problem) + objective_variables = keys(objective.terms) + F_abs = problem[:F_abs] + @test F_abs[NodeID(11)] ∈ objective_variables + @test F_abs[NodeID(24)] ∈ objective_variables + @test F_abs[NodeID(38)] ∈ objective_variables + + # Running full allocation algorithm + Ribasim.set_flow!(graph, NodeID(1), NodeID(2), 4.5) + Ribasim.update_allocation!((; p, t)) + + @test subnetwork_allocateds[NodeID(2), NodeID(11)] ≈ [4.0, 0.49766666, 0.0] + @test subnetwork_allocateds[NodeID(6), NodeID(24)] ≈ [0.00133333333, 0.0, 0.0] + @test subnetwork_allocateds[NodeID(10), NodeID(38)] ≈ [0.001, 0.0, 0.0] + + @test user.allocated[2] ≈ [4.0, 0.0, 0.0] + @test user.allocated[7] ≈ [0.001, 0.0, 0.0] +end diff --git a/core/test/create_test.jl b/core/test/create_test.jl new file mode 100644 index 000000000..946d6664f --- /dev/null +++ b/core/test/create_test.jl @@ -0,0 +1,89 @@ +@testitem "Non-positive allocation network ID" begin + using MetaGraphsNext + using Graphs + using Logging + using Ribasim + using Accessors: @set + + graph = MetaGraph( + DiGraph(); + label_type = Ribasim.NodeID, + vertex_data_type = Ribasim.NodeMetadata, + edge_data_type = Symbol, + graph_data = Tuple, + ) + + graph[Ribasim.NodeID(1)] = Ribasim.NodeMetadata(Symbol(:delft), 1) + graph[Ribasim.NodeID(2)] = Ribasim.NodeMetadata(Symbol(:denhaag), -1) + + graph[1, 2] = :yes + + node_ids = Dict{Int, Set{Ribasim.NodeID}}() + node_ids[0] = Set{Ribasim.NodeID}() + node_ids[-1] = Set{Ribasim.NodeID}() + push!(node_ids[0], Ribasim.NodeID(1)) + push!(node_ids[-1], Ribasim.NodeID(2)) + + graph_data = (; node_ids,) + graph = @set graph.graph_data = graph_data + + logger = TestLogger() + with_logger(logger) do + Ribasim.non_positive_allocation_network_id(graph) + end + + @test length(logger.logs) == 2 + @test logger.logs[1].level == Error + @test logger.logs[1].message == + "Allocation network id 0 needs to be a positive integer." + @test logger.logs[2].level == Error + @test logger.logs[2].message == + "Allocation network id -1 needs to be a positive integer." +end + +@testitem "Incomplete subnetwork" begin + using MetaGraphsNext + using Graphs + using Logging + using Ribasim + + graph = MetaGraph( + DiGraph(); + label_type = Ribasim.NodeID, + vertex_data_type = Ribasim.NodeMetadata, + edge_data_type = Symbol, + graph_data = Tuple, + ) + + node_ids = Dict{Int, Set{Ribasim.NodeID}}() + node_ids[1] = Set{Ribasim.NodeID}() + push!(node_ids[1], Ribasim.NodeID(1)) + push!(node_ids[1], Ribasim.NodeID(2)) + push!(node_ids[1], Ribasim.NodeID(3)) + node_ids[2] = Set{Ribasim.NodeID}() + push!(node_ids[2], Ribasim.NodeID(4)) + push!(node_ids[2], Ribasim.NodeID(5)) + push!(node_ids[2], Ribasim.NodeID(6)) + + graph[Ribasim.NodeID(1)] = Ribasim.NodeMetadata(Symbol(:delft), 1) + graph[Ribasim.NodeID(2)] = Ribasim.NodeMetadata(Symbol(:denhaag), 1) + graph[Ribasim.NodeID(3)] = Ribasim.NodeMetadata(Symbol(:rdam), 1) + graph[Ribasim.NodeID(4)] = Ribasim.NodeMetadata(Symbol(:adam), 2) + graph[Ribasim.NodeID(5)] = Ribasim.NodeMetadata(Symbol(:utrecht), 2) + graph[Ribasim.NodeID(6)] = Ribasim.NodeMetadata(Symbol(:leiden), 2) + + graph[Ribasim.NodeID(1), Ribasim.NodeID(2)] = :yes + graph[Ribasim.NodeID(1), Ribasim.NodeID(3)] = :yes + graph[4, 5] = :yes + + logger = TestLogger() + + with_logger(logger) do + errors = Ribasim.incomplete_subnetwork(graph, node_ids) + @test errors == true + end + + @test length(logger.logs) == 1 + @test logger.logs[1].level == Error + @test logger.logs[1].message == "All nodes in subnetwork 2 should be connected" +end diff --git a/core/test/libribasim_test.jl b/core/test/libribasim_test.jl index 3bcac1fc0..1eb42f431 100644 --- a/core/test/libribasim_test.jl +++ b/core/test/libribasim_test.jl @@ -1,8 +1,4 @@ @testitem "libribasim" begin - import BasicModelInterface as BMI - - include("../../build/libribasim/src/libribasim.jl") - toml_path = normpath(@__DIR__, "../../generated_testmodels/basic/ribasim.toml") # data from which we create pointers for libribasim diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index 267bb41f4..938953e6b 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -113,6 +113,42 @@ end @test successful_retcode(model) end +@testitem "leaky bucket model" begin + using SciMLBase: successful_retcode + import BasicModelInterface as BMI + + toml_path = normpath(@__DIR__, "../../generated_testmodels/leaky_bucket/ribasim.toml") + @test ispath(toml_path) + model = Ribasim.Model(toml_path) + @test model isa Ribasim.Model + + stor = model.integrator.u.storage + prec = model.integrator.p.basin.precipitation + evap = model.integrator.p.basin.potential_evaporation + drng = model.integrator.p.basin.drainage + infl = model.integrator.p.basin.infiltration + # The dynamic data has missings, but these are not set. + @test prec == [0.0] + @test evap == [0.0] + @test drng == [0.003] + @test infl == [0.0] + init_stor = 1000.0 + @test stor == [init_stor] + BMI.update_until(model, 1.5 * 86400) + @test prec == [0.0] + @test evap == [0.0] + @test drng == [0.003] + @test infl == [0.001] + stor ≈ Float32[init_stor + 86400 * (0.003 * 1.5 - 0.001 * 0.5)] + BMI.update_until(model, 2.5 * 86400) + @test prec == [0.00] + @test evap == [0.0] + @test drng == [0.001] + @test infl == [0.002] + stor ≈ Float32[init_stor + 86400 * (0.003 * 2.0 + 0.001 * 0.5 - 0.001 - 0.002 * 0.5)] + @test successful_retcode(Ribasim.solve!(model)) +end + @testitem "basic model" begin using Logging: Debug, with_logger using LoggingExtras diff --git a/core/test/validation_test.jl b/core/test/validation_test.jl index 40428028d..65040d70b 100644 --- a/core/test/validation_test.jl +++ b/core/test/validation_test.jl @@ -401,3 +401,27 @@ end @test logger.logs[2].message == "Basin / subgrid_level subgrid_id 1 has repeated element levels, this cannot be interpolated." end + +@testitem "negative demand" begin + using Logging + using DataInterpolations: LinearInterpolation + logger = TestLogger() + + with_logger(logger) do + @test_throws "Invalid demand" Ribasim.User( + [Ribasim.NodeID(1)], + [true], + [[LinearInterpolation([-5.0, -5.0], [-1.8, 1.8])]], + [0.0, -0.0], + [0.9], + [0.9], + [1], + [], + ) + end + + @test length(logger.logs) == 1 + @test logger.logs[1].level == Error + @test logger.logs[1].message == + "Demand of user node #1 with priority 1 should be non-negative" +end diff --git a/docs/Project.toml b/docs/Project.toml deleted file mode 100644 index a15dc604d..000000000 --- a/docs/Project.toml +++ /dev/null @@ -1,36 +0,0 @@ -name = "docs" -uuid = "8daea9ca-fc6c-4731-aa85-717fa0b706bc" -authors = ["Deltares and contributors "] -description = "Build Ribasim docs and generate schemas" -manifest = "../Manifest.toml" -version = "0.1.0" - -[deps] -Configurations = "5218b696-f38b-4ac9-8b61-a12ec717816d" -DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" -Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" -Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -DocumenterMarkdown = "997ab1e6-3595-5248-9280-8efb232c3433" -IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" -InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" -JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" -Legolas = "741b9549-f6ed-4911-9fbf-4a1c0c97f0cd" -Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" -MarkdownTables = "1862ce21-31c7-451e-824c-f20fa3f90fa2" -OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" -Ribasim = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" - -[compat] -Configurations = "0.17" -DataFrames = "1" -Dates = "<0.0.1,1" -Documenter = "0.27,1" -DocumenterMarkdown = "0.2" -IJulia = "1" -InteractiveUtils = "<0.0.1,1" -JSON3 = "1.12" -Legolas = "0.5" -Logging = "<0.0.1,1" -MarkdownTables = "1" -OrderedCollections = "1.6" -julia = "1.10" diff --git a/docs/core/allocation.qmd b/docs/core/allocation.qmd index f80c79a9c..e09d11765 100644 --- a/docs/core/allocation.qmd +++ b/docs/core/allocation.qmd @@ -2,15 +2,25 @@ title: "Allocation" --- -Allocation is the process of assigning an allocated abstraction flow rate to user nodes in the model based on information about sources, user demands over various priorities, constraints introduced by nodes, local water availability and graph topology. The allocation procedure implemented in Ribasim is heavily inspired by the [maximum flow problem](https://en.wikipedia.org/wiki/Maximum_flow_problem). +# Introduction +Allocation is the process of assigning an allocated abstraction flow rate to user nodes in the physical layer of the model based on information about sources, user demands over various priorities, constraints introduced by nodes, local water availability and graph topology. The allocation procedure implemented in Ribasim is heavily inspired by the [maximum flow problem](https://en.wikipedia.org/wiki/Maximum_flow_problem). -The allocation problem is solved per subnetwork of the Ribasim model. The subnetwork is used to formulate an optimization problem with the [JuMP](https://jump.dev/JuMP.jl/stable/) package, which is solved using the [HiGHS solver](https://highs.dev/). For more information see also the example of solving the maximum flow problem with `JuMP.jl` [here](https://jump.dev/JuMP.jl/stable/tutorials/linear/network_flows/#The-max-flow-problem). +The allocation problem is solved per subnetwork of the Ribasim model. The subnetwork is used to formulate an optimization problem with the [JuMP](https://jump.dev/JuMP.jl/stable/) package, which is solved using the [HiGHS solver](https://highs.dev/). For more in-depth information see also the example of solving the maximum flow problem with `JuMP.jl` [here](https://jump.dev/JuMP.jl/stable/tutorials/linear/network_flows/#The-max-flow-problem). -# The allocation problem +# The high level algorithm +The allocation algorithm consists of 3 types of optimization problems: + +1. **Subnetwork demand collection**: Collect the demands of a subnetwork from the main network by optimizing with unlimited capacity from the main network; +2. **Main network allocation**: Allocate to subnetworks with the above collected demand, and users in the main network; +3. **Subnetwork allocation**: Allocate to users in the subnetworks with the flows allocated to the subnetwork in the main network allocation. + +The total allocation algorithm consists of performing 1 for all subnetworks, then performing 2, then performing 3 for all subnetworks. Not having a main network is also supported, then 1 and 2 are skipped. + +# Elements of allocation The following data of the parameters and state of a Ribasim model are relevant for the allocation problem. -## Allocation problem input +## Schematisation input ### The subnetwork @@ -22,7 +32,7 @@ Sources are indicated by a set of edges in the subnetwork $$ E_S^\text{source} \subset \left(S \times S\right) \cap E. $$ -That is, if $(i,j) \in E_S^\text{source}$, then $Q_{ij}$ (see the [formal model description](equations.qmd#formal-model-description)) is treated as a source flow in the allocation problem. +That is, if $(i,j) \in E_S^\text{source}$, then $Q_{ij}$ (see the [formal model description](equations.qmd#formal-model-description)) is treated as a source flow in the allocation problem. These edges are either coming from a boundary/source node (e.g. a level or flow boundary) or connect the main network to a subnetwork. ### User demands @@ -35,6 +45,8 @@ $$ On this page we assume that the priorities are given by all integers from $1$ to some $p_{\max} \in \mathbb{N}$. However, in the Ribasim input this is not a requirement; some of these in between priority values can be missing, only the ordering of the given priorities is taken into account. ::: +## Simulation (physical layer) input + ### Vertical fluxes and local storage Apart from the source flows denoted by edges, there are other sources of water in the subnetwork, associated with the basins in the subnetwork $B_S = B \cap S$. Firstly there is the sum of the vertical fluxes (precipitation, evaporation, infiltration and drainage) for each basin @@ -48,20 +60,20 @@ $$ $$ Note that this value can be negative, which we interpret as a demand from the basin. -### Flow magnitude and direction constraints +### Constraining factors +#### Flow magnitude and direction constraints Nodes in the Ribasim model that have a `max_flow_rate`, i.e. pumps and outlets, put a constraint on the flow through that node. Some nodes only allow flow in one direction, like pumps, outlets and tabulated rating curves. -### Fractional flows and user return flows - +#### Fractional flows and user return flows Both fractional flow nodes and user nodes dictate proportional relationships between flows over edges in the subnetwork. Users have a return factor $0 \le r_i \le 1, i \in U_S$. -## The allocation optimization problem - -### The allocation subgraph +## The allocation graph A new graph is created from the subnetwork, which we call an allocation graph. The allocation graph is almost a subgraph of the main (flow) model, apart from the fact that an allocation graph can contain edges which are not in the main model. +### Nodes and edges + The allocation graph consists of: - Nodes $V'_S \subset V_S$, where each basin, source and user in the subnetwork get a node in the allocation graph. Also nodes that have fractional flow outneighbors get a node in the allocation graph. @@ -74,11 +86,11 @@ For notational convenience, we use the notation V^{\text{in}}_S(j) = \left\{i \in V'_S : (i,j) \in E_S\right\} \end{align} -for the set of in-neighbors and out-neighbors of a node in the allocation graph respectively +for the set of in-neighbors and out-neighbors of a node in the allocation graph respectively. -### The allocation graph capacities +### Capacities -The capacities of the edges of the allocation graph are collected in the sparse capacity matrix $C_S \in \overline{\mathbb{R}}_{\ge 0}^{n'\times n'}$ where $n' = \#V'_S$ is the number of nodes in the allocation graph. The capacities can be infinite. +Each edge in the allocation graph has an associated capacity. These capacities are collected in the sparse capacity matrix $C_S \in \overline{\mathbb{R}}_{\ge 0}^{n'\times n'}$ where $n' = \#V'_S$ is the number of nodes in the allocation graph. The capacities can be infinite, if there is nothing in the model constraining the capacity of the edge. The capacities are determined in 4 different ways: @@ -86,9 +98,13 @@ The capacities are determined in 4 different ways: - The capacity of the edge $e \in E_S$ is given by the smallest `max_flow_rate` of the nodes along the equivalent edges in the subnetwork. If there are no nodes with a `max_flow_rate`, the edge capacity is infinite; - If the edge is a source, the capacity of the edge is given by the flow rate of that source. -### The optimization variables +# The optimization problem -There are 2 types of variables whose value has to be determined to solve the allocation problem: +The optimization problem for a subnetwork is a linear optimization problem consisting of an objective function with associated constraints on a set of variables, all of which are introduced below. + +## The optimization variables + +There are 2 types of variable whose value has to be determined to solve the allocation problem: - The flows $F \in \mathbb{R}_{\ge 0}^{n'\times n'}$ over the edges in the allocation graph; - The allocations to the basins @@ -100,7 +116,7 @@ $$ Currently the basin allocations are not taken into account in the implementation. ::: -### The optimization objective +## The optimization objective The goal of allocation is to get the flow to the users as close as possible to their demand. To achieve this, the following objectives are supported: @@ -121,6 +137,11 @@ $$ \min \sum_{(i,j)\in E_S\;:\; i\in U_S} \left|1 - \frac{F_{ij}}{d_j^p(t)}\right| + c \sum_{e \in E_S} F_e $$ +:::{.callout-note} +When performing main network allocation, the connections to subnetworks are also interpreted as users with demands determined by subnetwork demand collection. +::: + + To avoid division by $0$ errors, if a `*_relative` objective is used and a demand is $0$, the coefficient of the flow $F_{ij}$ is set to $0$. For `*_absolute` objectives the optimizer cares about the actual amount of water allocated to a user, for `*_relative` objectives it cares about the fraction of the demand allocated to the user. For `quadratic_*` objectives the optimizer cares about avoiding large shortages, for `linear_*` objectives it treats all deviations equally. @@ -137,7 +158,7 @@ The absolute value applied here is not supported in a linear programming context In the future new optimization objectives will be introduced, for demands of basins and priorities over sources. These will be used in combination with the above, in the form of goal programming. ::: -### The optimization variable constraints +## The optimization constraints - Flow conservation: For the basins in the allocation graph we have that $$ \sum_{j=1}^{n'} F_{kj} \le \sum_{i=1}^{n'} F_{ik}, \quad \forall k \in B_S. @@ -148,6 +169,12 @@ $$ F_{ij} \le \left(C_S\right)_{ij}, \quad \forall(i,j) \in E_S. $$ {#eq-capacityconstraint} By the definition of $C_S$ this also includes the source flows. + +:::{.callout-note} +When performing subnetwork demand collection, these capacities are set to $\infty$ for edges which connect the main network to a subnetwork. +::: + + - User outflow: The outflow of the user is dictated by the inflow and the return factor: $$ F_{ik} = r_k \cdot F_{kj} \quad @@ -200,6 +227,26 @@ In the future there will be 2 more optimization solves: ## Example -:::{.callout-note} -An example with figures and data will be added here after addition of [allocation output files](https://github.com/Deltares/Ribasim/issues/659). -::: +The following is an example of an optimization problem for the example shown [here](../python/examples.ipynb#model-with-allocation): + + +```{julia} +# | code-fold: true +using Ribasim +using SQLite + +toml_path = normpath(@__DIR__, "../../generated_testmodels/allocation_example/ribasim.toml") +p = Ribasim.Model(toml_path).integrator.p + +allocation_model = p.allocation.allocation_models[1] +t = 0.0 +priority_idx = 1 + +Ribasim.set_flow!(p.graph, Ribasim.NodeID(1), Ribasim.NodeID(2), 1.0) + +Ribasim.adjust_source_capacities!(allocation_model, p, priority_idx) +Ribasim.adjust_edge_capacities!(allocation_model, p, priority_idx) +Ribasim.set_objective_priority!(allocation_model, p, t, priority_idx) + +println(p.allocation.allocation_models[1].problem) +``` diff --git a/docs/core/equations.qmd b/docs/core/equations.qmd index cc238d8a6..92d5d258f 100644 --- a/docs/core/equations.qmd +++ b/docs/core/equations.qmd @@ -42,7 +42,7 @@ $\mathbf{u}(t)$ is given by all the states of the model, which are (currently) t Given a single basin with node ID $i \in B$, the equation that dictates the change of its storage over time is given by $$ -\frac{\text{d}S_i}{\text{d}t} = +\frac{\text{d}u_i}{\text{d}t} = \sum_{(i',j') \in E | j' = i} Q_{i',j'} - \sum_{(i',j') \in E | i' = i} Q_{i',j'} + F_i(p,t). $$ @@ -74,15 +74,15 @@ $$ Most of these terms are always $0$, because a flow over an edge only depends on a small number of states. Therefore the matrix $J$ is very sparse. -For many contributions to the Jacobian the derivative of the level $l(S)$ of a basin with respect to its storage $S$ is required. To get an expression for this, we first look at the storage as a function of the level: +For many contributions to the Jacobian the derivative of the level $l(u)$ of a basin with respect to its storage $u$ is required. To get an expression for this, we first look at the storage as a function of the level: $$ -S(l) = \int_{l_0}^l A(\ell)d\ell. +u(l) = \int_{l_0}^l A(\ell)d\ell. $$ -From this we obtain $S'(l) = A(l)$ and thus +From this we obtain $u'(l) = A(l)$ and thus $$ -\frac{\text{d}l}{\text{d}S} = \frac{1}{A(S)}. +\frac{\text{d}l}{\text{d}u} = \frac{1}{A(u)}. $$ :::{.callout-note} @@ -149,11 +149,11 @@ plt.show() The precipitation term is given by $$ - Q_P = P \cdot A(S). + Q_P = P \cdot A(u). $$ {#eq-precip} Here $P = P(t)$ is the precipitation rate and $A$ is the wetted area. $A$ is a -function of the storage $S = S(t)$: as the volume of water changes, the area of the free water +function of the storage $u = u(t)$: as the volume of water changes, the area of the free water surface may change as well, depending on the slopes of the surface waters. ## Evaporation @@ -161,13 +161,13 @@ surface may change as well, depending on the slopes of the surface waters. The evaporation term is given by $$ - Q_E = E_\text{pot} \cdot A(S) \cdot \phi(d;0.1). + Q_E = E_\text{pot} \cdot A(u) \cdot \phi(d;0.1). $$ {#eq-evap} -Here $E_\text{pot} = E_\text{pot}(t)$ is the potential evaporation rate and $A$ is the wetted area. $\phi$ is the [reduction factor](equations.qmd#sec-reduction_factor) which depends on the depth $d$. It provides a smooth gradient as $S \rightarrow 0$. +Here $E_\text{pot} = E_\text{pot}(t)$ is the potential evaporation rate and $A$ is the wetted area. $\phi$ is the [reduction factor](equations.qmd#sec-reduction_factor) which depends on the depth $d$. It provides a smooth gradient as $u \rightarrow 0$. -A straightforward formulation $Q_E = \mathrm{max}(E_\text{pot} A(S), -0)$ is unsuitable, as $\frac{\mathrm{d}Q_E}{\mathrm{d}S}(S=0)$ is then not well-defined. +A straightforward formulation $Q_E = \mathrm{max}(E_\text{pot} A(u), +0)$ is unsuitable, as $\frac{\mathrm{d}Q_E}{\mathrm{d}u}(u=0)$ is then not well-defined.