diff --git a/dev/.documenter-siteinfo.json b/dev/.documenter-siteinfo.json index 7d4f92b..eedf988 100644 --- a/dev/.documenter-siteinfo.json +++ b/dev/.documenter-siteinfo.json @@ -1 +1 @@ -{"documenter":{"julia_version":"1.9.3","generation_timestamp":"2023-10-20T11:24:03","documenter_version":"1.1.1"}} \ No newline at end of file +{"documenter":{"julia_version":"1.9.3","generation_timestamp":"2023-10-28T11:15:52","documenter_version":"1.1.2"}} \ No newline at end of file diff --git a/dev/assets/Manifest.toml b/dev/assets/Manifest.toml index bf4ecad..310e4ea 100644 --- a/dev/assets/Manifest.toml +++ b/dev/assets/Manifest.toml @@ -56,9 +56,9 @@ version = "0.4.4" [[deps.Adapt]] deps = ["LinearAlgebra", "Requires"] -git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24" +git-tree-sha1 = "02f731463748db57cc2ebfbd9fbc9ce8280d3433" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.6.2" +version = "3.7.1" weakdeps = ["StaticArrays"] [deps.Adapt.extensions] @@ -235,9 +235,9 @@ uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" version = "1.3.2" [[deps.Bijections]] -git-tree-sha1 = "71281c0c28f97e0adeed24fdaa6bf7d37177f297" +git-tree-sha1 = "c9b163bd832e023571e86d0b90d9de92a9879088" uuid = "e2ed5e7c-b2de-5872-ae92-c73ca462fb04" -version = "0.1.5" +version = "0.1.6" [[deps.Bijectors]] deps = ["ArgCheck", "ChainRules", "ChainRulesCore", "ChangesOfVariables", "Compat", "Distributions", "Functors", "InverseFunctions", "IrrationalConstants", "LinearAlgebra", "LogExpFunctions", "MappedArrays", "Random", "Reexport", "Requires", "Roots", "SparseArrays", "Statistics"] @@ -320,9 +320,9 @@ version = "0.3.12" [[deps.ChainRules]] deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "SparseInverseSubset", "Statistics", "StructArrays", "SuiteSparse"] -git-tree-sha1 = "01b0594d8907485ed894bc59adfc0a24a9cde7a3" +git-tree-sha1 = "7e4f5593e7e1ab923cebc5414f6d5433872cdd19" uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2" -version = "1.55.0" +version = "1.56.0" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra"] @@ -583,9 +583,9 @@ weakdeps = ["OrdinaryDiffEq", "Sundials"] [[deps.DiffEqGPU]] deps = ["Adapt", "ChainRulesCore", "DiffEqBase", "Distributed", "DocStringExtensions", "ForwardDiff", "KernelAbstractions", "LinearAlgebra", "LinearSolve", "MuladdMacro", "Parameters", "Random", "RecursiveArrayTools", "Requires", "SciMLBase", "Setfield", "SimpleDiffEq", "StaticArrays", "TOML", "ZygoteRules"] -git-tree-sha1 = "03b0c485c7c165ce1949b5764a63bbbe2f5df10a" +git-tree-sha1 = "2901e4779448f53bfa71096bf7a504daebd3acb6" uuid = "071ae1c0-96b5-11e9-1965-c90190d839ea" -version = "3.1.0" +version = "3.2.0" [deps.DiffEqGPU.extensions] AMDGPUExt = ["AMDGPU"] @@ -679,9 +679,9 @@ version = "0.9.3" [[deps.Documenter]] deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "Dates", "DocStringExtensions", "Downloads", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "Test", "Unicode"] -git-tree-sha1 = "147a3cbb6ddcd9448fe5e6c426b347efc68f9c86" +git-tree-sha1 = "662fb21ae7fad33e044c2b59ece832fdce32c171" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "1.1.1" +version = "1.1.2" [[deps.DomainSets]] deps = ["CompositeTypes", "IntervalSets", "LinearAlgebra", "Random", "StaticArrays", "Statistics"] @@ -702,9 +702,9 @@ version = "0.6.8" [[deps.DynamicPPL]] deps = ["AbstractMCMC", "AbstractPPL", "BangBang", "Bijectors", "ChainRulesCore", "Compat", "ConstructionBase", "Distributions", "DocStringExtensions", "LinearAlgebra", "LogDensityProblems", "MacroTools", "OrderedCollections", "Random", "Requires", "Setfield", "Test", "ZygoteRules"] -git-tree-sha1 = "93e9f403d27137db580cd381943d3a8832beb70f" +git-tree-sha1 = "50a718301941d4ec4f391aa845ee434fce2dbe2e" uuid = "366bfd00-2699-11ea-058f-f148b4cae6d8" -version = "0.23.19" +version = "0.23.21" weakdeps = ["MCMCChains"] [deps.DynamicPPL.extensions] @@ -735,21 +735,21 @@ version = "1.0.4" [[deps.Enzyme]] deps = ["CEnum", "EnzymeCore", "Enzyme_jll", "GPUCompiler", "LLVM", "Libdl", "LinearAlgebra", "ObjectFile", "Preferences", "Printf", "Random"] -git-tree-sha1 = "a8a13f3259f59f1738e14296b9191299226ea6b6" +git-tree-sha1 = "821daa883570994b4c0f9994de2840cc506a51f3" uuid = "7da242da-08ed-463a-9acd-ee780be4f1d9" -version = "0.11.9" +version = "0.11.10" [[deps.EnzymeCore]] deps = ["Adapt"] -git-tree-sha1 = "d8701002a745c450c03b890f10d53636d1a8a7ea" +git-tree-sha1 = "ab81396e4e7b61f5590db02fa1c17fae4f16d7ab" uuid = "f151be2c-9106-41f4-ab19-57ee4f262869" -version = "0.6.2" +version = "0.6.3" [[deps.Enzyme_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "6febd718de952cb31717a34f81f98872c065e825" +git-tree-sha1 = "e248a990a4a917e587c6b7fcf64a4311649a267f" uuid = "7cc45869-7501-5eee-bdea-0790c847d4ef" -version = "0.0.88+0" +version = "0.0.89+0" [[deps.EpollShim_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -1115,9 +1115,9 @@ version = "0.14.7" [[deps.IntervalSets]] deps = ["Dates", "Random"] -git-tree-sha1 = "8e59ea773deee525c99a8018409f64f19fb719e6" +git-tree-sha1 = "3d8866c029dd6b16e69e0d4a939c4dfcb98fac47" uuid = "8197267c-284f-5f27-9208-e0e47529a953" -version = "0.7.7" +version = "0.7.8" weakdeps = ["Statistics"] [deps.IntervalSets.extensions] @@ -1236,16 +1236,22 @@ uuid = "88015f11-f218-50d7-93a8-a6af411a945d" version = "3.0.0+1" [[deps.LLVM]] -deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"] -git-tree-sha1 = "4ea2928a96acfcf8589e6cd1429eff2a3a82c366" +deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Preferences", "Printf", "Requires", "Unicode"] +git-tree-sha1 = "c879e47398a7ab671c782e02b51a4456794a7fa3" uuid = "929cbde3-209d-540e-8aea-75f648917ca0" -version = "6.3.0" +version = "6.4.0" + + [deps.LLVM.extensions] + BFloat16sExt = "BFloat16s" + + [deps.LLVM.weakdeps] + BFloat16s = "ab4f0b2a-ad5b-11e8-123f-65d77653426b" [[deps.LLVMExtra_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "e7c01b69bcbcb93fd4cbc3d0fea7d229541e18d2" +git-tree-sha1 = "a84f8f1e8caaaa4e3b4c101306b9e801d3883ace" uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab" -version = "0.0.26+0" +version = "0.0.27+0" [[deps.LLVMOpenMP_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1505,9 +1511,9 @@ weakdeps = ["ChainRulesCore", "ForwardDiff", "SpecialFunctions"] [[deps.MCMCChains]] deps = ["AbstractMCMC", "AxisArrays", "Dates", "Distributions", "Formatting", "IteratorInterfaceExtensions", "KernelDensity", "LinearAlgebra", "MCMCDiagnosticTools", "MLJModelInterface", "NaturalSort", "OrderedCollections", "PrettyTables", "Random", "RecipesBase", "Statistics", "StatsBase", "StatsFuns", "TableTraits", "Tables"] -git-tree-sha1 = "8778ea7283a0bf0d7e507a0235adfff38071493b" +git-tree-sha1 = "3b1ae6bcb0a94ed7760e72cd3524794f613658d2" uuid = "c7f686f2-ff18-58e9-bc7b-31028e88f75d" -version = "6.0.3" +version = "6.0.4" [[deps.MCMCDiagnosticTools]] deps = ["AbstractFFTs", "DataAPI", "DataStructures", "Distributions", "LinearAlgebra", "MLJModelInterface", "Random", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns", "Tables"] @@ -1572,9 +1578,9 @@ version = "0.1.2" [[deps.MathOptInterface]] deps = ["BenchmarkTools", "CodecBzip2", "CodecZlib", "DataStructures", "ForwardDiff", "JSON", "LinearAlgebra", "MutableArithmetics", "NaNMath", "OrderedCollections", "PrecompileTools", "Printf", "SparseArrays", "SpecialFunctions", "Test", "Unicode"] -git-tree-sha1 = "5c9f1e635e8d491297e596b56fec1c95eafb95a3" +git-tree-sha1 = "13b3d40084d04e609e0509730f05215fb2a2fba4" uuid = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" -version = "1.20.1" +version = "1.21.0" [[deps.MathProgBase]] deps = ["LinearAlgebra", "SparseArrays"] @@ -1804,9 +1810,9 @@ version = "0.3.1" [[deps.Optimization]] deps = ["ADTypes", "ArrayInterface", "ConsoleProgressMonitor", "DocStringExtensions", "LinearAlgebra", "Logging", "LoggingExtras", "Pkg", "Printf", "ProgressLogging", "Reexport", "Requires", "SciMLBase", "SparseArrays", "TerminalLoggers"] -git-tree-sha1 = "f59193f79f8310e5eaad309ccfa8f2fa06b6703b" +git-tree-sha1 = "1aa7ffea6e171167e9cae620d749e16d5874414a" uuid = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -version = "3.19.1" +version = "3.19.3" weakdeps = ["Enzyme", "FiniteDiff", "ForwardDiff", "ModelingToolkit", "ReverseDiff", "SparseDiffTools", "Symbolics", "Tracker", "Zygote"] [deps.Optimization.extensions] @@ -2220,9 +2226,9 @@ version = "0.3.6" [[deps.SciMLSensitivity]] deps = ["ADTypes", "Adapt", "ArrayInterface", "ChainRulesCore", "DiffEqBase", "DiffEqCallbacks", "DiffEqNoiseProcess", "Distributions", "EllipsisNotation", "Enzyme", "FiniteDiff", "ForwardDiff", "FunctionProperties", "FunctionWrappersWrappers", "Functors", "GPUArraysCore", "LinearAlgebra", "LinearSolve", "Markdown", "OrdinaryDiffEq", "Parameters", "PreallocationTools", "QuadGK", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "ReverseDiff", "SciMLBase", "SciMLOperators", "SimpleNonlinearSolve", "SparseDiffTools", "StaticArraysCore", "Statistics", "StochasticDiffEq", "Tracker", "TruncatedStacktraces", "Zygote", "ZygoteRules"] -git-tree-sha1 = "c6ae601b93ce2adf39a5bc6c690b4776d3ab0b71" +git-tree-sha1 = "0c97f476daa17ceedb7e743f70cd25a4bb1a0e54" uuid = "1ed8b502-d754-442c-8d5d-10ac956f44a1" -version = "7.44.0" +version = "7.46.0" [[deps.ScientificTypesBase]] git-tree-sha1 = "a8e18eb383b5ecf1b5e6fc237eb39255044fd92b" @@ -2273,9 +2279,9 @@ version = "1.11.0" [[deps.SimpleNonlinearSolve]] deps = ["ArrayInterface", "DiffEqBase", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "PackageExtensionCompat", "PrecompileTools", "Reexport", "SciMLBase", "StaticArraysCore"] -git-tree-sha1 = "e308d089f5d0e733a017b61784c5813e672f760d" +git-tree-sha1 = "15ff97fa4881133caa324dacafe28b5ac47ad8a2" uuid = "727e6d20-b764-4bd8-a329-72de5adea6c7" -version = "0.1.22" +version = "0.1.23" weakdeps = ["NNlib"] [deps.SimpleNonlinearSolve.extensions] @@ -2313,9 +2319,9 @@ uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [[deps.SparseDiffTools]] deps = ["ADTypes", "Adapt", "ArrayInterface", "Compat", "DataStructures", "FiniteDiff", "ForwardDiff", "Graphs", "LinearAlgebra", "PackageExtensionCompat", "Random", "Reexport", "SciMLOperators", "Setfield", "SparseArrays", "StaticArrayInterface", "StaticArrays", "Tricks", "UnPack", "VertexSafeGraphs"] -git-tree-sha1 = "336fd944a1bbb8873bfa8171387608ca93317d68" +git-tree-sha1 = "eac5b754896a56df0b9e049a3d93cc27912a78f2" uuid = "47a9eef4-7e08-11e9-0b38-333d64bd3804" -version = "2.8.0" +version = "2.9.0" weakdeps = ["Enzyme", "Symbolics", "Zygote"] [deps.SparseDiffTools.extensions] @@ -2470,9 +2476,9 @@ version = "5.10.1+6" [[deps.Sundials]] deps = ["CEnum", "DataStructures", "DiffEqBase", "Libdl", "LinearAlgebra", "Logging", "PrecompileTools", "Reexport", "SciMLBase", "SparseArrays", "Sundials_jll"] -git-tree-sha1 = "b208d5379feee6c1c9f9660e4294e77e40946884" +git-tree-sha1 = "71dc65a2d7decdde5500299c9b04309e0138d1b4" uuid = "c3572dad-4567-51f8-b174-8c6c989267f4" -version = "4.20.0" +version = "4.20.1" [[deps.Sundials_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "OpenBLAS_jll", "Pkg", "SuiteSparse_jll"] @@ -2523,9 +2529,9 @@ version = "1.0.1" [[deps.Tables]] deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits"] -git-tree-sha1 = "a1f34829d5ac0ef499f6d84428bd6b4c71f02ead" +git-tree-sha1 = "cb76cf677714c095e535e3501ac7954732aeea2d" uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" -version = "1.11.0" +version = "1.11.1" [[deps.Tar]] deps = ["ArgTools", "SHA"] @@ -2920,9 +2926,9 @@ version = "1.5.5+0" [[deps.Zygote]] deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"] -git-tree-sha1 = "90cc0e19831780e8a03623a59db4730d96045303" +git-tree-sha1 = "5ded212acd815612df112bb895ef3910c5a03f57" uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" -version = "0.6.66" +version = "0.6.67" weakdeps = ["Colors", "Distances", "Tracker"] [deps.Zygote.extensions] @@ -2932,9 +2938,9 @@ weakdeps = ["Colors", "Distances", "Tracker"] [[deps.ZygoteRules]] deps = ["ChainRulesCore", "MacroTools"] -git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d" +git-tree-sha1 = "9d749cd449fb448aeca4feee9a2f4186dbb5d184" uuid = "700de1a5-db45-46bc-99cf-38207098b444" -version = "0.2.3" +version = "0.2.4" [[deps.eudev_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "gperf_jll"] diff --git a/dev/index.html b/dev/index.html index 839fb09..d87a7b1 100644 --- a/dev/index.html +++ b/dev/index.html @@ -1,22 +1,22 @@ Home · SciMLExpectations.jl

SciMLExpectations.jl: Expected Values of Simulations and Uncertainty Quantification

SciMLExpectations.jl is a package for quantifying the uncertainties of simulations by calculating the expectations of observables with respect to input uncertainties. Its goal is to make it fast and easy to compute solution moments in a differentiable way, to enable fast optimization under uncertainty.

Warn

This package is still under heavy construction. Use at your own risk!

Installation

To install SciMLExpectations.jl, use the Julia package manager:

using Pkg
 Pkg.add("SciMLExpectations")

Contributing

Reproducibility

The documentation of this SciML package was built using these direct dependencies,
Status `~/work/SciMLExpectations.jl/SciMLExpectations.jl/docs/Project.toml`
-  [071ae1c0] DiffEqGPU v3.1.0
+  [071ae1c0] DiffEqGPU v3.2.0
   [77a26b50] DiffEqNoiseProcess v5.19.0
 ⌃ [0c46a032] DifferentialEquations v7.10.0
   [31c24e10] Distributions v0.25.102
-  [e30172f5] Documenter v1.1.1
+  [e30172f5] Documenter v1.1.2
   [f6369f11] ForwardDiff v0.10.36
   [e00cd5f1] IntegralsCuba v0.3.0
   [5ab0869b] KernelDensity v0.6.7
-  [c7f686f2] MCMCChains v6.0.3
-  [7f7a1694] Optimization v3.19.1
+  [c7f686f2] MCMCChains v6.0.4
+  [7f7a1694] Optimization v3.19.3
   [fd9f6733] OptimizationMOI v0.1.16
   [4e6fcdb7] OptimizationNLopt v0.1.8
   [1dea7af3] OrdinaryDiffEq v6.58.0
   [91a5bcdd] Plots v1.39.0
   [afe9f18d] SciMLExpectations v2.1.0 `~/work/SciMLExpectations.jl/SciMLExpectations.jl`
-  [1ed8b502] SciMLSensitivity v7.44.0
+  [1ed8b502] SciMLSensitivity v7.46.0
   [f3b207a7] StatsPlots v0.15.6
 ⌃ [789caeaf] StochasticDiffEq v6.62.0
   [fce5fe82] Turing v0.29.3
@@ -26,22 +26,22 @@
   Official https://julialang.org/ release
 Platform Info:
   OS: Linux (x86_64-linux-gnu)
-  CPU: 2 × Intel(R) Xeon(R) Platinum 8370C CPU @ 2.80GHz
+  CPU: 2 × Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz
   WORD_SIZE: 64
   LIBM: libopenlibm
-  LLVM: libLLVM-14.0.6 (ORCJIT, icelake-server)
+  LLVM: libLLVM-14.0.6 (ORCJIT, haswell)
   Threads: 1 on 2 virtual cores
A more complete overview of all dependencies and their versions is also provided.
Status `~/work/SciMLExpectations.jl/SciMLExpectations.jl/docs/Manifest.toml`
   [47edcb42] ADTypes v0.2.4
   [a4c015fc] ANSIColoredPrinters v0.0.1
 ⌅ [c3fe647b] AbstractAlgebra v0.32.5
   [621f4979] AbstractFFTs v1.5.0
-  [80f14c24] AbstractMCMC v4.4.2
+⌅ [80f14c24] AbstractMCMC v4.4.2
   [7a57a42e] AbstractPPL v0.6.2
   [1520ce14] AbstractTrees v0.4.4
-  [79e6a3ab] Adapt v3.6.2
+  [79e6a3ab] Adapt v3.7.1
   [0bf59076] AdvancedHMC v0.5.5
   [5b7e9947] AdvancedMH v0.7.5
-  [576499cb] AdvancedPS v0.4.3
+⌅ [576499cb] AdvancedPS v0.4.3
   [b5ca4192] AdvancedVI v0.2.4
   [dce04be8] ArgCheck v2.3.0
   [ec485272] ArnoldiMethod v0.2.0
@@ -56,7 +56,7 @@
   [198e06fe] BangBang v0.3.39
   [9718e550] Baselet v0.1.1
   [6e4b80f9] BenchmarkTools v1.3.2
-  [e2ed5e7c] Bijections v0.1.5
+  [e2ed5e7c] Bijections v0.1.6
   [76274a88] Bijectors v0.13.7
   [d1d4a3ce] BitFlags v0.1.7
   [62783981] BitTwiddlingConvenienceFunctions v0.1.5
@@ -66,7 +66,7 @@
   [00ebfdb7] CSTParser v3.3.6
   [49dc2e85] Calculus v0.5.1
   [7057c7e9] Cassette v0.3.12
-  [082447d4] ChainRules v1.55.0
+  [082447d4] ChainRules v1.56.0
   [d360d2e6] ChainRulesCore v1.18.0
   [9e997f8a] ChangesOfVariables v0.1.8
   [fb6a15b2] CloseOpenIntervals v0.1.12
@@ -101,7 +101,7 @@
   [b429d917] DensityInterface v0.4.0
 ⌃ [2b5f629d] DiffEqBase v6.130.0
   [459566f4] DiffEqCallbacks v2.33.1
-  [071ae1c0] DiffEqGPU v3.1.0
+  [071ae1c0] DiffEqGPU v3.2.0
   [77a26b50] DiffEqNoiseProcess v5.19.0
   [163ba53b] DiffResults v1.1.0
   [b552c78f] DiffRules v1.15.1
@@ -110,16 +110,16 @@
   [31c24e10] Distributions v0.25.102
   [ced4e74d] DistributionsAD v0.6.53
   [ffbed154] DocStringExtensions v0.9.3
-  [e30172f5] Documenter v1.1.1
+  [e30172f5] Documenter v1.1.2
 ⌅ [5b8099bc] DomainSets v0.6.7
   [fa6b7ba4] DualNumbers v0.6.8
-  [366bfd00] DynamicPPL v0.23.19
+  [366bfd00] DynamicPPL v0.23.21
   [7c1d4256] DynamicPolynomials v0.5.3
   [da5c29d0] EllipsisNotation v1.7.0
-  [cad2338a] EllipticalSliceSampling v1.1.0
+⌅ [cad2338a] EllipticalSliceSampling v1.1.0
   [4e289a0a] EnumX v1.0.4
-  [7da242da] Enzyme v0.11.9
-  [f151be2c] EnzymeCore v0.6.2
+  [7da242da] Enzyme v0.11.10
+  [f151be2c] EnzymeCore v0.6.3
   [460bff9d] ExceptionUnwrapping v0.1.9
   [d4d017d3] ExponentialUtilities v1.25.0
   [e2ba6199] ExprTools v0.1.10
@@ -145,7 +145,7 @@
   [c27321d9] Glob v1.3.1
   [86223c79] Graphs v1.9.0
   [42e2da0e] Grisu v1.0.2
-  [0b43b601] Groebner v0.4.4
+⌅ [0b43b601] Groebner v0.4.4
   [d5909c97] GroupsCore v0.4.0
   [19dc6840] HCubature v1.5.1
   [cd3eb016] HTTP v1.10.0
@@ -161,7 +161,7 @@
   [de52edbc] Integrals v3.9.0
   [e00cd5f1] IntegralsCuba v0.3.0
   [a98d9a8b] Interpolations v0.14.7
-  [8197267c] IntervalSets v0.7.7
+  [8197267c] IntervalSets v0.7.8
   [3587e190] InverseFunctions v0.1.12
   [41ab1584] InvertedIndices v1.3.0
   [92d709cd] IrrationalConstants v0.2.2
@@ -176,7 +176,7 @@
   [63c18a36] KernelAbstractions v0.9.10
   [5ab0869b] KernelDensity v0.6.7
   [ba0b0d4f] Krylov v0.9.4
-  [929cbde3] LLVM v6.3.0
+  [929cbde3] LLVM v6.4.0
   [8ac3fa9e] LRUCache v1.5.0
   [b964fa9f] LaTeXStrings v1.3.0
   [2ee39098] LabelledArrays v1.14.0
@@ -195,7 +195,7 @@
   [2ab3a3ac] LogExpFunctions v0.3.26
   [e6f89c97] LoggingExtras v1.0.3
   [bdcacae8] LoopVectorization v0.12.165
-  [c7f686f2] MCMCChains v6.0.3
+  [c7f686f2] MCMCChains v6.0.4
   [be115224] MCMCDiagnosticTools v0.3.7
   [e80e1ace] MLJModelInterface v1.9.3
   [d8e11817] MLStyle v0.4.17
@@ -203,7 +203,7 @@
   [d125e4d3] ManualMemory v0.1.8
   [dbb5928d] MappedArrays v0.4.2
   [d0879d2d] MarkdownAST v0.1.2
-  [b8f27783] MathOptInterface v1.20.1
+  [b8f27783] MathOptInterface v1.21.0
   [fdba3010] MathProgBase v0.7.8
   [739be429] MbedTLS v1.1.7
   [442fdcdd] Measures v0.3.2
@@ -230,7 +230,7 @@
   [4d8831e6] OpenSSL v1.4.1
   [429524aa] Optim v1.7.8
   [3bd65402] Optimisers v0.3.1
-  [7f7a1694] Optimization v3.19.1
+  [7f7a1694] Optimization v3.19.3
   [fd9f6733] OptimizationMOI v0.1.16
   [4e6fcdb7] OptimizationNLopt v0.1.8
   [bac558e1] OrderedCollections v1.6.2
@@ -281,7 +281,7 @@
   [afe9f18d] SciMLExpectations v2.1.0 `~/work/SciMLExpectations.jl/SciMLExpectations.jl`
   [e9a6253c] SciMLNLSolve v0.1.9
   [c0aeaf25] SciMLOperators v0.3.6
-  [1ed8b502] SciMLSensitivity v7.44.0
+  [1ed8b502] SciMLSensitivity v7.46.0
   [30f210dd] ScientificTypesBase v3.0.0
   [6c6a2e73] Scratch v1.2.0
   [91c51154] SentinelArrays v1.4.0
@@ -289,12 +289,12 @@
   [992d4aef] Showoff v1.0.3
   [777ac1f9] SimpleBufferStream v1.1.0
   [05bca326] SimpleDiffEq v1.11.0
-  [727e6d20] SimpleNonlinearSolve v0.1.22
+  [727e6d20] SimpleNonlinearSolve v0.1.23
   [699a6c99] SimpleTraits v0.9.4
   [ce78b400] SimpleUnPack v1.1.0
   [66db9d55] SnoopPrecompile v1.0.3
   [a2af1166] SortingAlgorithms v1.2.0
-  [47a9eef4] SparseDiffTools v2.8.0
+  [47a9eef4] SparseDiffTools v2.9.0
   [dc90abb0] SparseInverseSubset v0.1.1
   [e56a9233] Sparspak v0.3.9
   [276daf66] SpecialFunctions v2.3.1
@@ -314,13 +314,13 @@
   [892a3eda] StringManipulation v0.3.4
   [09ab397b] StructArrays v0.6.16
   [53d494c1] StructIO v0.3.0
-  [c3572dad] Sundials v4.20.0
+  [c3572dad] Sundials v4.20.1
   [2efcf032] SymbolicIndexingInterface v0.2.2
   [d1185830] SymbolicUtils v1.4.0
   [0c5d862f] Symbolics v5.10.0
   [ab02a1b2] TableOperations v1.2.0
   [3783bdb8] TableTraits v1.0.1
-  [bd369af6] Tables v1.11.0
+  [bd369af6] Tables v1.11.1
   [62fd8b95] TensorCore v0.1.1
   [5d786b92] TerminalLoggers v0.1.7
   [8290d209] ThreadingUtilities v0.5.2
@@ -347,14 +347,14 @@
   [19fa3120] VertexSafeGraphs v0.2.0
   [cc8bc4a8] Widgets v0.6.6
   [efce3f68] WoodburyMatrices v0.5.5
-  [e88e6eb3] Zygote v0.6.66
-  [700de1a5] ZygoteRules v0.2.3
+  [e88e6eb3] Zygote v0.6.67
+  [700de1a5] ZygoteRules v0.2.4
   [ae81ac8f] ASL_jll v0.1.3+0
 ⌅ [68821587] Arpack_jll v3.5.1+1
   [6e34b625] Bzip2_jll v1.0.8+0
   [83423d85] Cairo_jll v1.16.1+1
   [3bed1096] Cuba_jll v4.2.2+1
-  [7cc45869] Enzyme_jll v0.0.88+0
+  [7cc45869] Enzyme_jll v0.0.89+0
   [2702e6a9] EpollShim_jll v0.0.20230411+0
   [2e619515] Expat_jll v2.5.0+0
 ⌃ [b22a6f82] FFMPEG_jll v4.4.2+2
@@ -373,7 +373,7 @@
   [aacddb02] JpegTurbo_jll v2.1.91+0
   [c1c5ebd0] LAME_jll v3.100.1+0
   [88015f11] LERC_jll v3.0.0+1
-  [dad2f222] LLVMExtra_jll v0.0.26+0
+  [dad2f222] LLVMExtra_jll v0.0.27+0
   [1d63c593] LLVMOpenMP_jll v15.0.4+0
   [dd4b983a] LZO_jll v2.10.1+0
 ⌅ [e9f186c6] Libffi_jll v3.2.2+1
@@ -490,4 +490,4 @@
   [8e850b90] libblastrampoline_jll v5.8.0+0
   [8e850ede] nghttp2_jll v1.48.0+0
   [3f19e933] p7zip_jll v17.4.0+0
-Info Packages marked with ⌃ and ⌅ have new versions available, but those with ⌅ are restricted by compatibility constraints from upgrading. To see why use `status --outdated -m`

You can also download the manifest file and the project file.

+Info Packages marked with ⌃ and ⌅ have new versions available, but those with ⌅ are restricted by compatibility constraints from upgrading. To see why use `status --outdated -m`

You can also download the manifest file and the project file.

diff --git a/dev/manual/algorithms/index.html b/dev/manual/algorithms/index.html index 840a97f..2269af9 100644 --- a/dev/manual/algorithms/index.html +++ b/dev/manual/algorithms/index.html @@ -1,2 +1,2 @@ -Expectation Algorithms · SciMLExpectations.jl
+Expectation Algorithms · SciMLExpectations.jl
diff --git a/dev/manual/problem/index.html b/dev/manual/problem/index.html index 133d2d0..63ff67a 100644 --- a/dev/manual/problem/index.html +++ b/dev/manual/problem/index.html @@ -1,4 +1,4 @@ ExpectationProblem · SciMLExpectations.jl

ExpectationProblem

SciMLExpectations.ExpectationProblemType
ExpectationProblem(S, g, h, pdist, params, nout)
 ExpectationProblem(g, pdist, params; nout = 1)
-ExpectationProblem(sm::SystemMap, g, h, d; nout = 1)

Defines ∫ g(S(h(x,u0,p)))*f(x)dx

Arguments

Let 𝕏 = uncertainty space, 𝕌 = Initial condition space, ℙ = model parameter space

  • S: 𝕌 × ℙ → 𝕌 also known as system map.
  • g: 𝕌 × ℙ → ℝⁿᵒᵘᵗ also known as the observables or output function.
  • h: 𝕏 × 𝕌 × ℙ → 𝕌 × ℙ also known as covariate function.
  • pdf(d,x): 𝕏 → ℝ the uncertainty distribution of the initial states.
  • params
  • nout
source
SciMLExpectations.SystemMapType
SystemMap(prob, args...; kwargs...)

Representation of a system solution map for a given prob::DEProblem. args and kwargs are forwarded to the equation solver.

source
SciMLExpectations.ProcessNoiseSystemMapType
ProcessNoiseSystemMap(prob, n, args...; kwargs...)

Representation of a system solution map for a given prob::SDEProblem. args and kwargs are forwarded to the equation solver. n is the number of terms in the Kosambi–Karhunen–Loève representation of the process noise.

source
SciMLExpectations.GenericDistributionType

GenericDistribution(d, ds...)

Defines a generic distribution that just wraps functions for pdf function, rand and bounds. User can use this for define any arbitrary joint pdf. Included b/c Distributions.jl Product method of mixed distributions are type instable.

source
+ExpectationProblem(sm::SystemMap, g, h, d; nout = 1)

Defines ∫ g(S(h(x,u0,p)))*f(x)dx

Arguments

Let 𝕏 = uncertainty space, 𝕌 = Initial condition space, ℙ = model parameter space

source
SciMLExpectations.SystemMapType
SystemMap(prob, args...; kwargs...)

Representation of a system solution map for a given prob::DEProblem. args and kwargs are forwarded to the equation solver.

source
SciMLExpectations.ProcessNoiseSystemMapType
ProcessNoiseSystemMap(prob, n, args...; kwargs...)

Representation of a system solution map for a given prob::SDEProblem. args and kwargs are forwarded to the equation solver. n is the number of terms in the Kosambi–Karhunen–Loève representation of the process noise.

source
SciMLExpectations.GenericDistributionType

GenericDistribution(d, ds...)

Defines a generic distribution that just wraps functions for pdf function, rand and bounds. User can use this for define any arbitrary joint pdf. Included b/c Distributions.jl Product method of mixed distributions are type instable.

source
diff --git a/dev/manual/solve/index.html b/dev/manual/solve/index.html index 3e7b18a..087e220 100644 --- a/dev/manual/solve/index.html +++ b/dev/manual/solve/index.html @@ -1,4 +1,4 @@ -Solving Expectation Problems · SciMLExpectations.jl

Solving Expectation Problems

CommonSolve.solveMethod
solve(exprob::ExpectationProblem, expalg::MonteCarlo)

Solve an ExpectationProblem using Monte Carlo integration.

source
CommonSolve.solveMethod
solve(exprob::ExpectationProblem, expalg::Koopman;
+Solving Expectation Problems · SciMLExpectations.jl

Solving Expectation Problems

CommonSolve.solveMethod
solve(exprob::ExpectationProblem, expalg::MonteCarlo)

Solve an ExpectationProblem using Monte Carlo integration.

source
CommonSolve.solveMethod
solve(exprob::ExpectationProblem, expalg::Koopman;
       maxiters = 1000000, batch = 0, quadalg = HCubatureJL(),
-      ireltol = 1e-2, iabstol = 1e-2, kwargs...)

Solve an ExpectationProblem using Koopman integration.

source
+ ireltol = 1e-2, iabstol = 1e-2, kwargs...)

Solve an ExpectationProblem using Koopman integration.

source
diff --git a/dev/tutorials/gpu_bayesian/96735fe1.svg b/dev/tutorials/gpu_bayesian/646b829a.svg similarity index 91% rename from dev/tutorials/gpu_bayesian/96735fe1.svg rename to dev/tutorials/gpu_bayesian/646b829a.svg index fe43208..66b684e 100644 --- a/dev/tutorials/gpu_bayesian/96735fe1.svg +++ b/dev/tutorials/gpu_bayesian/646b829a.svg @@ -1,350 +1,350 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/gpu_bayesian/61c84ddd.svg b/dev/tutorials/gpu_bayesian/8747d35c.svg similarity index 71% rename from dev/tutorials/gpu_bayesian/61c84ddd.svg rename to dev/tutorials/gpu_bayesian/8747d35c.svg index 418fe06..9ba8a0d 100644 --- a/dev/tutorials/gpu_bayesian/61c84ddd.svg +++ b/dev/tutorials/gpu_bayesian/8747d35c.svg @@ -1,248 +1,248 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/gpu_bayesian/dff386e6.svg b/dev/tutorials/gpu_bayesian/bea642b9.svg similarity index 92% rename from dev/tutorials/gpu_bayesian/dff386e6.svg rename to dev/tutorials/gpu_bayesian/bea642b9.svg index 3d1bad8..58e360e 100644 --- a/dev/tutorials/gpu_bayesian/dff386e6.svg +++ b/dev/tutorials/gpu_bayesian/bea642b9.svg @@ -1,54 +1,54 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/gpu_bayesian/index.html b/dev/tutorials/gpu_bayesian/index.html index 849439f..23c481d 100644 --- a/dev/tutorials/gpu_bayesian/index.html +++ b/dev/tutorials/gpu_bayesian/index.html @@ -22,7 +22,7 @@ u0 = [1.0, 1.0] prob1 = ODEProblem(lotka_volterra, u0, (0.0, 10.0), p) sol = solve(prob1, Tsit5()) -plot(sol)Example block output

From the Lotka-Volterra equation, we will generate a dataset with known parameters:

sol1 = solve(prob1, Tsit5(), saveat = 0.1)
retcode: Success
+plot(sol)
Example block output

From the Lotka-Volterra equation, we will generate a dataset with known parameters:

sol1 = solve(prob1, Tsit5(), saveat = 0.1)
retcode: Success
 Interpolation: 1st order linear
 t: 101-element Vector{Float64}:
   0.0
@@ -67,7 +67,7 @@
  [0.98356090632007, 1.1062868199419809]
  [1.0337581256020674, 0.9063703842886032]

Now let's assume our dataset should have noise. We can add this noise in and plot the noisy data against the generating set:

odedata = Array(sol1) + 0.8 * randn(size(Array(sol1)))
 plot(sol1, alpha = 0.3, legend = false);
-scatter!(sol1.t, odedata');
Example block output

Now let's assume that all we know is the data odedata and the model form. What we want to do is use the data to inform us of the parameters, but also get a probabilistic sense of the uncertainty around our parameter estimate. This is done via Bayesian estimation. For a full look at Bayesian estimation of differential equations, look at the Bayesian differential equation tutorial from Turing.jl.

Following that tutorial, we choose a set of priors and perform NUTS sampling to arrive at the MCMC chain:

Turing.setadbackend(:forwarddiff)
+scatter!(sol1.t, odedata');
Example block output

Now let's assume that all we know is the data odedata and the model form. What we want to do is use the data to inform us of the parameters, but also get a probabilistic sense of the uncertainty around our parameter estimate. This is done via Bayesian estimation. For a full look at Bayesian estimation of differential equations, look at the Bayesian differential equation tutorial from Turing.jl.

Following that tutorial, we choose a set of priors and perform NUTS sampling to arrive at the MCMC chain:

Turing.setadbackend(:forwarddiff)
 
 @model function fitlv(data, prob1)
     σ ~ InverseGamma(2, 3) # ~ is the tilde character
@@ -93,8 +93,8 @@
 Iterations        = 501:1:1500
 Number of chains  = 3
 Samples per chain = 1000
-Wall duration     = 57.62 seconds
-Compute duration  = 52.41 seconds
+Wall duration     = 68.11 seconds
+Compute duration  = 61.85 seconds
 parameters        = σ, α, β, γ, δ
 internals         = lp, n_steps, is_accept, acceptance_rate, log_density, hamiltonian_energy, hamiltonian_energy_error, max_hamiltonian_energy_error, tree_depth, numerical_error, step_size, nom_step_size
 
@@ -118,7 +118,7 @@
            β    0.9894    1.0369    1.0640    1.1084    1.1829
            γ    2.7360    2.9281    3.0304    3.1281    3.3134
            δ    0.8724    0.9384    0.9747    1.0111    1.0733
-

This chain gives a discrete approximation to the probability distribution of our desired quantities. We can plot the chains to see these distributions in action:

plot(chain)
Example block output

Great! From our data, we have arrived at a probability distribution for our parameter values.

Evaluating Model Hypotheses with the Koopman Expectation

Now, let's try to ask a question: what is the expected value of x (the first term in the differential equation) at time t=10 given the known uncertainties in our parameters? This is a good tutorial question because all other probabilistic statements can be phrased similarly. Asking a question like, “what is the probability that x(T) > 1 at the final time T?”, can similarly be phrased as an expected value (probability statements are expected values of characteristic functions which are 1 if true 0 if false). So in general, the kinds of questions we want to ask and answer are expectations about the solutions of the differential equation.

The trivial to solve this problem is to sample 100,000 sets of parameters from our parameter distribution given by the Bayesian estimation, solve the ODE 100,000 times, and then take the average. But is 100,000 ODE solves enough? Well it's hard to tell, and even then, the convergence of this approach is slow. This is the Monte Carlo approach, and it converges to the correct answer by sqrt(N). Slow.

However, the Koopman expectation can converge with much fewer points, allowing the use of higher order quadrature methods to converge exponentially faster in many cases. To use the Koopman expectation, we first need to define our observable function g. This function designates the thing about the solution we wish to calculate the expectation of. Thus for our question “what is the expected value of xat time t=10?” we would simply use:

function g(sol, p)
+

This chain gives a discrete approximation to the probability distribution of our desired quantities. We can plot the chains to see these distributions in action:

plot(chain)
Example block output

Great! From our data, we have arrived at a probability distribution for our parameter values.

Evaluating Model Hypotheses with the Koopman Expectation

Now, let's try to ask a question: what is the expected value of x (the first term in the differential equation) at time t=10 given the known uncertainties in our parameters? This is a good tutorial question because all other probabilistic statements can be phrased similarly. Asking a question like, “what is the probability that x(T) > 1 at the final time T?”, can similarly be phrased as an expected value (probability statements are expected values of characteristic functions which are 1 if true 0 if false). So in general, the kinds of questions we want to ask and answer are expectations about the solutions of the differential equation.

The trivial to solve this problem is to sample 100,000 sets of parameters from our parameter distribution given by the Bayesian estimation, solve the ODE 100,000 times, and then take the average. But is 100,000 ODE solves enough? Well it's hard to tell, and even then, the convergence of this approach is slow. This is the Monte Carlo approach, and it converges to the correct answer by sqrt(N). Slow.

However, the Koopman expectation can converge with much fewer points, allowing the use of higher order quadrature methods to converge exponentially faster in many cases. To use the Koopman expectation, we first need to define our observable function g. This function designates the thing about the solution we wish to calculate the expectation of. Thus for our question “what is the expected value of xat time t=10?” we would simply use:

function g(sol, p)
     sol[1, end]
 end
g (generic function with 1 method)

Now we need to use the expectation call, where we need to provide our initial condition and parameters as probability distributions. For this case, we will use the same constant u0 as before. But, let's turn our Bayesian MCMC chains into distributions through kernel density estimation (the plots of the distribution above are just KDE plots!).

p_kde = [kde(vec(Array(chain[:α]))), kde(vec(Array(chain[:β]))),
     kde(vec(Array(chain[:γ]))), kde(vec(Array(chain[:δ])))]
4-element Vector{KernelDensity.UnivariateKDE{StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}}}:
@@ -153,4 +153,4 @@
 monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
 @time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(), trajectories = 10_000,
                   saveat = 1.0f0)

Let's now use this in the ensembling method. We need to specify a batch for the number of ODEs solved at the same time, and pass in our ensembling method. The following is a GPU-accelerated uncertainty quantified estimate of the expectation of the solution:

# batchmode = EnsembleGPUArray() #where to pass this?
-sol = solve(exprob, Koopman(), batch = 100, quadalg = CubaSUAVE())
+sol = solve(exprob, Koopman(), batch = 100, quadalg = CubaSUAVE()) diff --git a/dev/tutorials/introduction/2457e3ca.svg b/dev/tutorials/introduction/5550ebaa.svg similarity index 77% rename from dev/tutorials/introduction/2457e3ca.svg rename to dev/tutorials/introduction/5550ebaa.svg index 5b0a6f8..3b69208 100644 --- a/dev/tutorials/introduction/2457e3ca.svg +++ b/dev/tutorials/introduction/5550ebaa.svg @@ -1,200 +1,200 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/introduction/e60eea05.svg b/dev/tutorials/introduction/609fa8a8.svg similarity index 82% rename from dev/tutorials/introduction/e60eea05.svg rename to dev/tutorials/introduction/609fa8a8.svg index d0af47b..ff39dfd 100644 --- a/dev/tutorials/introduction/e60eea05.svg +++ b/dev/tutorials/introduction/609fa8a8.svg @@ -1,69 +1,69 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/introduction/dc9e9ec8.svg b/dev/tutorials/introduction/91ff811a.svg similarity index 90% rename from dev/tutorials/introduction/dc9e9ec8.svg rename to dev/tutorials/introduction/91ff811a.svg index 213c812..cb3f274 100644 --- a/dev/tutorials/introduction/dc9e9ec8.svg +++ b/dev/tutorials/introduction/91ff811a.svg @@ -1,48 +1,48 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/introduction/7fdd94d1.svg b/dev/tutorials/introduction/e803f7b4.svg similarity index 81% rename from dev/tutorials/introduction/7fdd94d1.svg rename to dev/tutorials/introduction/e803f7b4.svg index 35c3603..aedc742 100644 --- a/dev/tutorials/introduction/7fdd94d1.svg +++ b/dev/tutorials/introduction/e803f7b4.svg @@ -1,48 +1,48 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/introduction/index.html b/dev/tutorials/introduction/index.html index 443d2d6..41d77ad 100644 --- a/dev/tutorials/introduction/index.html +++ b/dev/tutorials/introduction/index.html @@ -5,14 +5,14 @@ tspan = (0.0, 10.0) prob = ODEProblem(f, u0, tspan, p) sol = solve(prob, Tsit5()) -plot(sol)Example block output

However, what if we wish to consider a random initial condition? Assume u0 is distributed uniformly from -10.0 to 10.0, i.e.,

using Distributions
+plot(sol)
Example block output

However, what if we wish to consider a random initial condition? Assume u0 is distributed uniformly from -10.0 to 10.0, i.e.,

using Distributions
 u0_dist = [Uniform(-10.0, 10.0)]
1-element Vector{Distributions.Uniform{Float64}}:
  Distributions.Uniform{Float64}(a=-10.0, b=10.0)

We can then run a Monte Carlo simulation of 100,000 trajectories by solving an EnsembleProblem.

prob_func(prob, i, repeat) = remake(prob, u0 = rand.(u0_dist))
 ensemble_prob = EnsembleProblem(prob, prob_func = prob_func)
 
 ensemblesol = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 100000)
EnsembleSolution Solution of length 100000 with uType:
 ODESolution{Float64, 2, Vector{Vector{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Vector{Float64}}}, ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, false, Vector{Float64}, ODEFunction{false, SciMLBase.AutoSpecialize, typeof(Main.f), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, OrdinaryDiffEq.InterpolationData{ODEFunction{false, SciMLBase.AutoSpecialize, typeof(Main.f), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Vector{Vector{Float64}}, Vector{Float64}, Vector{Vector{Vector{Float64}}}, OrdinaryDiffEq.Tsit5ConstantCache}, DiffEqBase.Stats, Nothing}

Plotting a summary of the trajectories produces

summ = EnsembleSummary(ensemblesol)
-plot(summ)
Example block output

Given the ensemble solution, we can then compute the expectation of a function $g\left(\text{sol},p\right)$ of the system state u at any time in the timespan, e.g., the state itself at t=4.0 as

g(sol, p) = sol(4.0)
+plot(summ)
Example block output

Given the ensemble solution, we can then compute the expectation of a function $g\left(\text{sol},p\right)$ of the system state u at any time in the timespan, e.g., the state itself at t=4.0 as

g(sol, p) = sol(4.0)
 mean([g(sol, prob.p) for sol in ensemblesol])
1-element Vector{Float64}:
  0.006729399779693562

Alternatively, SciMLExpectations.jl offers a convenient interface for this type of calculation, using ExpectationProblem.

using SciMLExpectations
 gd = GenericDistribution(u0_dist...)
@@ -87,7 +87,7 @@
  0.2016186364835746
  0.17353590778751105
  0.1493637534733821

We can then plot these values along with the analytical solution

plot(t -> exp(p[1] * t) * mean(u0_dist[1]), tspan..., xlabel = "t", label = "analytical")
-scatter!(collect(saveat), sol.u, marker = :o, label = nothing)
Example block output

Benefits of Using Vector-Valued Functions

In the above examples, we used vector-valued expectation calculations to compute the various expectations required. Alternatively, one could simply compute multiple scalar-valued expectations. However, in most cases it is more efficient to use the vector-valued form. This is especially true when the ODE to be solved is computationally expensive.

To demonstrate this, let's compute the expectation of $x$, $x^2$, and $x^3$ using both approaches while counting the number of times g() is evaluated. This is the same as the number of simulation runs required to arrive at the solution. First, consider the scalar-valued approach. Here, we follow the same method as before, but we add a counter to our function evaluation that stores the number of function calls for each expectation calculation to an array.

function g(sol, p, power, counter)
+scatter!(collect(saveat), sol.u, marker = :o, label = nothing)
Example block output

Benefits of Using Vector-Valued Functions

In the above examples, we used vector-valued expectation calculations to compute the various expectations required. Alternatively, one could simply compute multiple scalar-valued expectations. However, in most cases it is more efficient to use the vector-valued form. This is especially true when the ODE to be solved is computationally expensive.

To demonstrate this, let's compute the expectation of $x$, $x^2$, and $x^3$ using both approaches while counting the number of times g() is evaluated. This is the same as the number of simulation runs required to arrive at the solution. First, consider the scalar-valued approach. Here, we follow the same method as before, but we add a counter to our function evaluation that stores the number of function calls for each expectation calculation to an array.

function g(sol, p, power, counter)
     counter[power] += 1
     sol(4.0)[1]^power
 end
@@ -141,7 +141,7 @@
      ribbon = t -> -sqrt(exp(2 * p[1] * t) * var(u0_dist[1])),
      label = "Analytical Mean, 1 std bounds")
 scatter!(collect(saveat), μ, marker = :x, yerror = σ, c = :black,
-         label = "Koopman Mean, 1 std bounds")
Example block output

Skewness

A similar approach can be used to compute skewness

function g(sol, p)
+         label = "Koopman Mean, 1 std bounds")
Example block output

Skewness

A similar approach can be used to compute skewness

function g(sol, p)
     x = sol(4.0)[1]
     [x, x^2, x^3]
 end
@@ -184,4 +184,4 @@
 sm = SystemMap(prob, Tsit5(), EnsembleCPUArray())
 exprob = ExpectationProblem(sm, g, h, gd; nout = 1)
 sol = solve(exprob, Koopman(), batch = 10, quadalg = CubaSUAVE())
-sol.u

The performance gains realized by leveraging batch GPU processing is problem-dependent. In this case, the number of batch evaluations required to overcome the overhead of using the GPU exceeds the number of simulations required to converge to the quadrature solution.

+sol.u

The performance gains realized by leveraging batch GPU processing is problem-dependent. In this case, the number of batch evaluations required to overcome the overhead of using the GPU exceeds the number of simulations required to converge to the quadrature solution.

diff --git a/dev/tutorials/optimization_under_uncertainty/55a7fa59.svg b/dev/tutorials/optimization_under_uncertainty/25943d84.svg similarity index 82% rename from dev/tutorials/optimization_under_uncertainty/55a7fa59.svg rename to dev/tutorials/optimization_under_uncertainty/25943d84.svg index f5f501c..0cf8684 100644 --- a/dev/tutorials/optimization_under_uncertainty/55a7fa59.svg +++ b/dev/tutorials/optimization_under_uncertainty/25943d84.svg @@ -1,50 +1,50 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/01b56c52.svg b/dev/tutorials/optimization_under_uncertainty/32d0d08d.svg similarity index 89% rename from dev/tutorials/optimization_under_uncertainty/01b56c52.svg rename to dev/tutorials/optimization_under_uncertainty/32d0d08d.svg index 70fe628..5e79024 100644 --- a/dev/tutorials/optimization_under_uncertainty/01b56c52.svg +++ b/dev/tutorials/optimization_under_uncertainty/32d0d08d.svg @@ -1,50 +1,50 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/1be450da.svg b/dev/tutorials/optimization_under_uncertainty/3a8cbffd.svg similarity index 99% rename from dev/tutorials/optimization_under_uncertainty/1be450da.svg rename to dev/tutorials/optimization_under_uncertainty/3a8cbffd.svg index ae5a60c..f08441c 100644 --- a/dev/tutorials/optimization_under_uncertainty/1be450da.svg +++ b/dev/tutorials/optimization_under_uncertainty/3a8cbffd.svg @@ -1,551 +1,551 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/48522f88.svg b/dev/tutorials/optimization_under_uncertainty/41ad3b6a.svg similarity index 90% rename from dev/tutorials/optimization_under_uncertainty/48522f88.svg rename to dev/tutorials/optimization_under_uncertainty/41ad3b6a.svg index 7bf42c8..7c6e790 100644 --- a/dev/tutorials/optimization_under_uncertainty/48522f88.svg +++ b/dev/tutorials/optimization_under_uncertainty/41ad3b6a.svg @@ -1,45 +1,45 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/4289f109.svg b/dev/tutorials/optimization_under_uncertainty/a7450916.svg similarity index 99% rename from dev/tutorials/optimization_under_uncertainty/4289f109.svg rename to dev/tutorials/optimization_under_uncertainty/a7450916.svg index 133d356..6fd65bb 100644 --- a/dev/tutorials/optimization_under_uncertainty/4289f109.svg +++ b/dev/tutorials/optimization_under_uncertainty/a7450916.svg @@ -1,551 +1,551 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/6dc3b2cc.svg b/dev/tutorials/optimization_under_uncertainty/cb1ce5d5.svg similarity index 90% rename from dev/tutorials/optimization_under_uncertainty/6dc3b2cc.svg rename to dev/tutorials/optimization_under_uncertainty/cb1ce5d5.svg index 9b2b1ce..1505d49 100644 --- a/dev/tutorials/optimization_under_uncertainty/6dc3b2cc.svg +++ b/dev/tutorials/optimization_under_uncertainty/cb1ce5d5.svg @@ -1,47 +1,47 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/2e756b62.svg b/dev/tutorials/optimization_under_uncertainty/d1af0674.svg similarity index 99% rename from dev/tutorials/optimization_under_uncertainty/2e756b62.svg rename to dev/tutorials/optimization_under_uncertainty/d1af0674.svg index 8f191dc..cd10284 100644 --- a/dev/tutorials/optimization_under_uncertainty/2e756b62.svg +++ b/dev/tutorials/optimization_under_uncertainty/d1af0674.svg @@ -1,150 +1,150 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/c627738d.svg b/dev/tutorials/optimization_under_uncertainty/f4886ad1.svg similarity index 99% rename from dev/tutorials/optimization_under_uncertainty/c627738d.svg rename to dev/tutorials/optimization_under_uncertainty/f4886ad1.svg index 5115680..43884ef 100644 --- a/dev/tutorials/optimization_under_uncertainty/c627738d.svg +++ b/dev/tutorials/optimization_under_uncertainty/f4886ad1.svg @@ -1,150 +1,150 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorials/optimization_under_uncertainty/index.html b/dev/tutorials/optimization_under_uncertainty/index.html index 3574602..d25cc6f 100644 --- a/dev/tutorials/optimization_under_uncertainty/index.html +++ b/dev/tutorials/optimization_under_uncertainty/index.html @@ -18,14 +18,14 @@ prob = ODEProblem(ball!, u0, tspan, p) sol = solve(prob, Tsit5(), callback = ground_cb) -plot(sol, vars = (1, 3), label = nothing, xlabel = "x", ylabel = "y")Example block output

For this particular problem, we wish to measure the impact distance from a point $y=25$ on a wall at $x=25$. So, we introduce an additional callback that terminates the simulation on wall impact.

stop_condition(u, t, integrator) = u[1] - 25.0
+plot(sol, vars = (1, 3), label = nothing, xlabel = "x", ylabel = "y")
Example block output

For this particular problem, we wish to measure the impact distance from a point $y=25$ on a wall at $x=25$. So, we introduce an additional callback that terminates the simulation on wall impact.

stop_condition(u, t, integrator) = u[1] - 25.0
 stop_cb = ContinuousCallback(stop_condition, terminate!)
 cbs = CallbackSet(ground_cb, stop_cb)
 
 tspan = (0.0, 1500.0)
 prob = ODEProblem(ball!, u0, tspan, p)
 sol = solve(prob, Tsit5(), callback = cbs)
-plot(sol, vars = (1, 3), label = nothing, xlabel = "x", ylabel = "y")
Example block output

To help visualize this problem, we plot as follows, where the star indicates a desired impact location

rectangle(xc, yc, w, h) = Shape(xc .+ [-w, w, w, -w] ./ 2.0, yc .+ [-h, -h, h, h] ./ 2.0)
+plot(sol, vars = (1, 3), label = nothing, xlabel = "x", ylabel = "y")
Example block output

To help visualize this problem, we plot as follows, where the star indicates a desired impact location

rectangle(xc, yc, w, h) = Shape(xc .+ [-w, w, w, -w] ./ 2.0, yc .+ [-h, -h, h, h] ./ 2.0)
 
 begin
     plot(sol, vars = (1, 3), label = nothing, lw = 3, c = :black)
@@ -34,7 +34,7 @@
     plot!(rectangle(27.5, 25, 5, 50), c = :red, label = nothing)
     scatter!([25], [25], marker = :star, ms = 10, label = nothing, c = :green)
     ylims!(0.0, 50.0)
-end
Example block output

Considering Uncertainty

We now wish to introduce uncertainty in p[2], the coefficient of restitution. This is defined via a continuous univariate distribution from Distributions.jl. We can then run a Monte Carlo simulation of 100 trajectories via the EnsembleProblem interface.

using Distributions
+end
Example block output

Considering Uncertainty

We now wish to introduce uncertainty in p[2], the coefficient of restitution. This is defined via a continuous univariate distribution from Distributions.jl. We can then run a Monte Carlo simulation of 100 trajectories via the EnsembleProblem interface.

using Distributions
 
 cor_dist = truncated(Normal(0.9, 0.02), 0.9 - 3 * 0.02, 1.0)
 trajectories = 100
@@ -52,7 +52,7 @@
     scatter!([25], [25], marker = :star, ms = 10, label = nothing, c = :green)
     plot!(sol, vars = (1, 3), label = nothing, lw = 3, c = :black, ls = :dash)
     xlims!(0.0, 27.5)
-end
Example block output

Here, we plot the first 350 Monte Carlo simulations along with the trajectory corresponding to the mean of the distribution (dashed line).

We now wish to compute the expected squared impact distance from the star. This is called an “observation” of our system or an “observable” of interest.

We define this observable as

obs(sol, p) = abs2(sol[3, end] - 25)
obs (generic function with 1 method)

With the observable defined, we can compute the expected squared miss distance from our Monte Carlo simulation results as

mean_ensemble = mean([obs(sol, p) for sol in ensemblesol])
35.90267990627214

Alternatively, we can use the Koopman() algorithm in SciMLExpectations.jl to compute this expectation much more efficiently as

using SciMLExpectations
+end
Example block output

Here, we plot the first 350 Monte Carlo simulations along with the trajectory corresponding to the mean of the distribution (dashed line).

We now wish to compute the expected squared impact distance from the star. This is called an “observation” of our system or an “observable” of interest.

We define this observable as

obs(sol, p) = abs2(sol[3, end] - 25)
obs (generic function with 1 method)

With the observable defined, we can compute the expected squared miss distance from our Monte Carlo simulation results as

mean_ensemble = mean([obs(sol, p) for sol in ensemblesol])
35.90267990627214

Alternatively, we can use the Koopman() algorithm in SciMLExpectations.jl to compute this expectation much more efficiently as

using SciMLExpectations
 gd = GenericDistribution(cor_dist)
 h(x, u, p) = u, [p[1]; x[1]]
 sm = SystemMap(prob, Tsit5(), callback = cbs)
@@ -92,7 +92,7 @@
     scatter!([25], [25], marker = :star, ms = 10, label = nothing, c = :green)
     ylims!(0.0, 50.0)
     xlims!(minx[1], 27.5)
-end
Example block output

Looks pretty good! But, how long did it take? Let's benchmark.

@time solve(opt_prob, optimizer)
u: 3-element Vector{Float64}:
+end
Example block output

Looks pretty good! But, how long did it take? Let's benchmark.

@time solve(opt_prob, optimizer)
u: 3-element Vector{Float64}:
   0.0
   2.4428947026478425
  49.20927899180528

Not bad for bound constrained optimization under uncertainty of a hybrid system!

Probabilistic Constraints

With this approach, we can also consider probabilistic constraints. Let us now consider a wall at $x=20$ with height 25.

constraint = [20.0, 25.0]
@@ -105,7 +105,7 @@
     scatter!([25], [25], marker = :star, ms = 10, label = nothing, c = :green)
     ylims!(0.0, 50.0)
     xlims!(minx[1], 27.5)
-end
Example block output

We now wish to minimize the same loss function as before, but introduce an inequality constraint such that the solution must have less than a 1% chance of colliding with the wall at $x=20$. This class of probabilistic constraints is called a chance constraint.

To do this, we first introduce a new callback and solve the system using the previous optimal solution

constraint_condition(u, t, integrator) = u[1] - constraint[1]
+end
Example block output

We now wish to minimize the same loss function as before, but introduce an inequality constraint such that the solution must have less than a 1% chance of colliding with the wall at $x=20$. This class of probabilistic constraints is called a chance constraint.

To do this, we first introduce a new callback and solve the system using the previous optimal solution

constraint_condition(u, t, integrator) = u[1] - constraint[1]
 function constraint_affect!(integrator)
     integrator.u[3] < constraint[2] ? terminate!(integrator) : nothing
 end
@@ -128,7 +128,7 @@
     scatter!([25], [25], marker = :star, ms = 10, label = nothing, c = :green)
     ylims!(0.0, 50.0)
     xlims!(minx[1], 27.5)
-end
Example block output

That doesn't look good!

We now need a second observable for the system. To compute a probability of impact, we use an indicator function for if a trajectory impacts the wall. In other words, this functions returns 1 if the trajectory hits the wall and 0 otherwise.

function constraint_obs(sol, p)
+end
Example block output

That doesn't look good!

We now need a second observable for the system. To compute a probability of impact, we use an indicator function for if a trajectory impacts the wall. In other words, this functions returns 1 if the trajectory hits the wall and 0 otherwise.

function constraint_obs(sol, p)
     sol((constraint[1] - sol[1, 1]) / sol[2, 1])[3] <= constraint[2] ? one(sol[1, end]) :
     zero(sol[1, end])
 end
constraint_obs (generic function with 1 method)

Using the previously computed optimal initial conditions, let's compute the probability of hitting this wall

sm = SystemMap(remake(prob, u0 = make_u0(minx)), Tsit5(), callback = cbs)
@@ -170,4 +170,4 @@
     scatter!([25], [25], marker = :star, ms = 10, label = nothing, c = :green)
     ylims!(0.0, 50.0)
     xlims!(minx[1], 27.5)
-end
Example block output +endExample block output diff --git a/dev/tutorials/process_noise/index.html b/dev/tutorials/process_noise/index.html index e649e26..fcf247e 100644 --- a/dev/tutorials/process_noise/index.html +++ b/dev/tutorials/process_noise/index.html @@ -26,4 +26,4 @@ 4.375566158316454 8.751132316632908 13.126698474949364 - 17.502264633265817

In theory, any numerical integration method from Integrals.jl is supported, but in practice many of the techniques struggle with correctly calculating the expected value. We got the best results with the Divonne algorithm from Cuba.jl.

+ 17.502264633265817

In theory, any numerical integration method from Integrals.jl is supported, but in practice many of the techniques struggle with correctly calculating the expected value. We got the best results with the Divonne algorithm from Cuba.jl.