diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 9088f49..0e9ecac 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -12,14 +12,18 @@ on: - 'docs/**' jobs: test: - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: group: - Core version: - '1' - - '1.6' + os: + - ubuntu-latest + - macos-latest + - windows-latest steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v1 diff --git a/Project.toml b/Project.toml index 528576d..7ce4fe3 100644 --- a/Project.toml +++ b/Project.toml @@ -23,14 +23,14 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" Aqua = "0.8" BenchmarkTools = "1" ComponentArrays = "0.15" +Cuba = "2" +Cubature = "1.5" DiffEqBase = "6.100" DiffEqNoiseProcess = "5" Distributions = "0.23, 0.24, 0.25" FiniteDiff = "2" ForwardDiff = "0.10" -Integrals = "3.1" -IntegralsCuba = "0.2" -IntegralsCubature = "0.2" +Integrals = "4.4" KernelDensity = "0.6" LinearAlgebra = "1" OrdinaryDiffEq = "6" @@ -42,7 +42,7 @@ RecursiveArrayTools = "2, 3" Reexport = "0.2, 1.0" ReferenceTests = "0.10" SafeTestsets = "0.1" -SciMLBase = "1, 2" +SciMLBase = "2" StaticArrays = "1" Statistics = "1" StochasticDiffEq = "6" @@ -56,11 +56,11 @@ julia = "1.6" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" +Cuba = "8a292aeb-7a57-582c-b821-06e4c11590b1" +Cubature = "667455a9-e2ce-5579-9412-b964f529a492" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Integrals = "de52edbc-65ea-441a-8357-d3a637375a31" -IntegralsCuba = "e00cd5f1-6337-4131-8b37-28b2fe4cd6cb" -IntegralsCubature = "c31f79ba-6e32-46d4-a52f-182a8ac42a54" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" @@ -74,4 +74,4 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TestExtras = "5ed8adda-3752-4e41-b88a-e8b09835ee3a" [targets] -test = ["Test", "Aqua", "SafeTestsets", "TestExtras", "Pkg", "PkgBenchmark", "BenchmarkTools", "ReferenceTests", "SymPy", "Random", "OrdinaryDiffEq", "ComponentArrays", "ForwardDiff", "Integrals", "IntegralsCubature", "IntegralsCuba", "FiniteDiff", "RecursiveArrayTools", "StochasticDiffEq"] +test = ["Test", "Aqua", "SafeTestsets", "TestExtras", "Pkg", "PkgBenchmark", "BenchmarkTools", "ReferenceTests", "SymPy", "Random", "OrdinaryDiffEq", "ComponentArrays", "ForwardDiff", "Integrals", "Cubature", "Cuba", "FiniteDiff", "RecursiveArrayTools", "StochasticDiffEq"] diff --git a/README.md b/README.md index 815bc26..2b0c558 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ the documentation, which contains the unreleased features. ### Example ```julia -using SciMLExpectations, OrdinaryDiffEq, Distributions, IntegralsCubature +using SciMLExpectations, OrdinaryDiffEq, Distributions, Cubature function eom!(du, u, p, t, A) du .= A * u @@ -56,7 +56,7 @@ julia> analytical ```julia g(sol, p) = sol[:, end] -exprob = ExpectationProblem(sm, g, cov, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, cov, gd) sol = solve(exprob, Koopman(); quadalg = CubatureJLh(), ireltol = 1e-3, iabstol = 1e-3) sol.u # Expectation of the states 1 and 2 at the final time point diff --git a/docs/Project.toml b/docs/Project.toml index 9fb8f7c..2783027 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,11 +1,11 @@ [deps] +Cuba = "8a292aeb-7a57-582c-b821-06e4c11590b1" DiffEqGPU = "071ae1c0-96b5-11e9-1965-c90190d839ea" DiffEqNoiseProcess = "77a26b50-5914-5dd7-bc55-306e6241c503" DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -IntegralsCuba = "e00cd5f1-6337-4131-8b37-28b2fe4cd6cb" KernelDensity = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b" MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" @@ -26,7 +26,6 @@ DifferentialEquations = "7" Distributions = "0.25" Documenter = "1" ForwardDiff = "0.10" -IntegralsCuba = "0.3" KernelDensity = "0.6" MCMCChains = "6" Optimization = "3" diff --git a/docs/src/tutorials/gpu_bayesian.md b/docs/src/tutorials/gpu_bayesian.md index 3b5af9f..a3efce9 100644 --- a/docs/src/tutorials/gpu_bayesian.md +++ b/docs/src/tutorials/gpu_bayesian.md @@ -22,7 +22,7 @@ Let's start by importing all the necessary libraries: using OrdinaryDiffEq using Turing, MCMCChains, Distributions using KernelDensity -using SciMLExpectations, IntegralsCuba +using SciMLExpectations, Cuba using DiffEqGPU using Plots, StatsPlots using Random; @@ -166,7 +166,7 @@ ub = [p_kde_i.x[end] for p_kde_i in p_kde] #upper bound for integration gd = GenericDistribution(pdf_func, rand_func, lb, ub) h(x, u, p) = u, x sm = SystemMap(prob1, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman(); reltol = 1e-2, abstol = 1e-2) sol.u ``` diff --git a/docs/src/tutorials/introduction.md b/docs/src/tutorials/introduction.md index 80be7ad..92d79c6 100644 --- a/docs/src/tutorials/introduction.md +++ b/docs/src/tutorials/introduction.md @@ -62,7 +62,7 @@ using SciMLExpectations gd = GenericDistribution(u0_dist...) h(x, u, p) = x, p sm = SystemMap(prob, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, MonteCarlo(100000)) sol.u ``` @@ -115,7 +115,7 @@ Changing the distribution, we arrive at ```@example introduction u0_dist = [Uniform(0.0, 10.0)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, MonteCarlo(100000)) sol.u ``` @@ -125,7 +125,7 @@ and ```@example introduction u0_dist = [Uniform(0.0, 10.0)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -141,7 +141,7 @@ Note that the `Koopman()` algorithm doesn't currently support infinite or semi-i ```@example introduction u0_dist = [Normal(3.0, 2.0)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -157,7 +157,7 @@ Using a truncated distribution will alleviate this problem. However, there is an ```@example introduction u0_dist = [truncated(Normal(3.0, 2.0), -1000, 1000)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -167,7 +167,7 @@ whereas truncating at $\pm 4\sigma$ produces the correct result ```@example introduction u0_dist = [truncated(Normal(3.0, 2.0), -5, 11)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -177,20 +177,20 @@ If a large truncation is required, it is best practice to center the distributio ```@example introduction u0_dist = [truncated(Normal(3.0, 2.0), 3 - 1000, 3 + 1000)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` ## Vector-Valued Functions -`ExpectationProblem` can also handle vector-valued functions. Simply pass the vector-valued function and set the `nout` kwarg to the length of the vector the function returns. +`ExpectationProblem` can also handle vector-valued functions. Here, we demonstrate this by computing the expectation of `u` at `t=4.0s` and `t=6.0s` ```@example introduction g(sol, p) = [sol(4.0)[1], sol(6.0)[1]] -exprob = ExpectationProblem(sm, g, h, gd; nout = 2) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -208,7 +208,7 @@ saveat = tspan[1]:0.5:tspan[2] g(sol, p) = sol[1, :] prob = ODEProblem(f, u0, tspan, p, saveat = saveat) sm = SystemMap(prob, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(saveat)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -234,21 +234,21 @@ end counter = [0, 0, 0] g(sol, p, power) = g(sol, p, power, counter) g(sol, p) = g(sol, p, 1) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` ```@example introduction g(sol, p) = g(sol, p, 2) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` ```@example introduction g(sol, p) = g(sol, p, 3) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -269,7 +269,7 @@ function g(sol, p, counter) end counter = [0] g(sol, p) = g(sol, p, counter) -exprob = ExpectationProblem(sm, g, h, gd; nout = 3) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -305,7 +305,7 @@ function g(sol, p) x = sol(4.0)[1] [x, x^2] end -exprob = ExpectationProblem(sm, g, h, gd; nout = 2) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u mean_g = sol.u[1] @@ -327,7 +327,7 @@ saveat = tspan[1]:0.5:tspan[2] g(sol, p) = [sol[1, :]; sol[1, :] .^ 2] prob = ODEProblem(f, u0, tspan, p, saveat = saveat) sm = SystemMap(prob, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(saveat) * 2) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u @@ -350,7 +350,7 @@ function g(sol, p) x = sol(4.0)[1] [x, x^2, x^3] end -exprob = ExpectationProblem(sm, g, h, gd; nout = 3) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u mean_g = sol.u[1] @@ -367,7 +367,7 @@ It is also possible to solve the various simulations in parallel by using the `b The default quadrature algorithm to solve `ExpectationProblem` does not support batch-mode evaluation. So, we first load dependencies for additional quadrature algorithms ```@example introduction -using IntegralsCuba +using Cuba ``` We then solve our expectation as before, using a `batch=10` multi-thread parallelization via `EnsembleThreads()` of Cuba's SUAVE algorithm. We also introduce additional uncertainty in the model parameter. @@ -379,7 +379,7 @@ gd = GenericDistribution(u0_dist, p_dist) g(sol, p) = sol(6.0)[1] h(x, u, p) = [x[1]], [x[2]] sm = SystemMap(prob, Tsit5(), EnsembleThreads()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) # batchmode = EnsembleThreads() #where to pass this? sol = solve(exprob, Koopman(), batch = 10, quadalg = CubaSUAVE()) sol.u @@ -392,8 +392,8 @@ Now, let's compare the performance of the batch and non-batch modes ``` ```@example introduction -solve(exprob, Koopman(), batch = 0, quadalg = CubaSUAVE()) -@time solve(exprob, Koopman(), batch = 0, quadalg = CubaSUAVE()) +solve(exprob, Koopman(), quadalg = CubaSUAVE()) +@time solve(exprob, Koopman(), quadalg = CubaSUAVE()) ``` It is also possible to parallelize across the GPU. However, one must be careful of the limitations of ensemble solutions with the GPU. Please refer to [DiffEqGPU.jl](https://github.com/SciML/DiffEqGPU.jl) for details. @@ -428,7 +428,7 @@ g(sol, p) = sol(6.0f0)[1] h(x, u, p) = [x[1]], [x[2]] prob = ODEProblem(f, u0, tspan, p) sm = SystemMap(prob, Tsit5(), EnsembleCPUArray()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman(), batch = 10, quadalg = CubaSUAVE()) sol.u ``` diff --git a/docs/src/tutorials/optimization_under_uncertainty.md b/docs/src/tutorials/optimization_under_uncertainty.md index 116a290..c9e99e5 100644 --- a/docs/src/tutorials/optimization_under_uncertainty.md +++ b/docs/src/tutorials/optimization_under_uncertainty.md @@ -106,7 +106,7 @@ using SciMLExpectations gd = GenericDistribution(cor_dist) h(x, u, p) = u, [p[1]; x[1]] sm = SystemMap(prob, Tsit5(), callback = cbs) -exprob = ExpectationProblem(sm, obs, h, gd; nout = 1) +exprob = ExpectationProblem(sm, obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) sol.u ``` @@ -121,7 +121,7 @@ make_u0(θ) = [θ[1], θ[2], θ[3], 0.0] function 𝔼_loss(θ, pars) prob = ODEProblem(ball!, make_u0(θ), tspan, p) sm = SystemMap(prob, Tsit5(), callback = cbs) - exprob = ExpectationProblem(sm, obs, h, gd; nout = 1) + exprob = ExpectationProblem(sm, obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) sol.u end @@ -228,7 +228,7 @@ Using the previously computed optimal initial conditions, let's compute the prob ```@example control sm = SystemMap(remake(prob, u0 = make_u0(minx)), Tsit5(), callback = cbs) -exprob = ExpectationProblem(sm, constraint_obs, h, gd; nout = 1) +exprob = ExpectationProblem(sm, constraint_obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) sol.u ``` @@ -239,7 +239,7 @@ We then set up the constraint function for NLopt just as before. function 𝔼_constraint(res, θ, pars) prob = ODEProblem(ball!, make_u0(θ), tspan, p) sm = SystemMap(prob, Tsit5(), callback = cbs) - exprob = ExpectationProblem(sm, constraint_obs, h, gd; nout = 1) + exprob = ExpectationProblem(sm, constraint_obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) res .= sol.u end diff --git a/docs/src/tutorials/process_noise.md b/docs/src/tutorials/process_noise.md index 714a215..7d0a77e 100644 --- a/docs/src/tutorials/process_noise.md +++ b/docs/src/tutorials/process_noise.md @@ -5,7 +5,7 @@ This done by representing the Wiener process using the [Kosambi–Karhunen–Lo ```@example process_noise using SciMLExpectations -using IntegralsCuba +using Cuba using StochasticDiffEq using DiffEqNoiseProcess using Distributions @@ -19,7 +19,7 @@ prob = SDEProblem(f, g, u0, (0.0, 1.0), noise = W) sm = ProcessNoiseSystemMap(prob, 8, LambaEM(), abstol = 1e-3, reltol = 1e-3) cov(x, u, p) = x, p observed(sol, p) = sol[:, end] -exprob = ExpectationProblem(sm, observed, cov; nout = length(u0)) +exprob = ExpectationProblem(sm, observed, cov) sol1 = solve(exprob, Koopman(), ireltol = 1e-3, iabstol = 1e-3, batch = 64, quadalg = CubaDivonne()) sol1.u diff --git a/src/expectation.jl b/src/expectation.jl index b21d5ba..f3a3864 100644 --- a/src/expectation.jl +++ b/src/expectation.jl @@ -32,21 +32,23 @@ struct MonteCarlo <: AbstractExpectationAlgorithm end # Builds integrand for arbitrary functions -function build_integrand(prob::ExpectationProblem, ::Koopman, ::Val{false}) +function build_integrand(prob::ExpectationProblem, ::Koopman, mid, p, ::Nothing) @unpack g, d = prob - function (x, p) + function integrand_koopman(x, p) g(x, p) * pdf(d, x) end + return IntegralFunction{false}(integrand_koopman, nothing) end # Builds integrand for DEProblems -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{false}) where {F <: SystemMap} +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, p, + ::Nothing) where {F <: SystemMap} @unpack S, g, h, d = prob - function (x, p) + function integrand_koopman_systemmap(x, p) ū, p̄ = h(x, p.x[1], p.x[2]) g(S(ū, p̄), p̄) * pdf(d, x) end + return IntegralFunction{false}(integrand_koopman_systemmap, nothing) end function _make_view(x::Union{Vector{T}, Adjoint{T, Vector{T}}}, i) where {T} @@ -57,58 +59,43 @@ function _make_view(x, i) @view x[:, i] end -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{true}) where {F <: SystemMap} +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, p, + batch::Integer) where {F <: SystemMap} @unpack S, g, h, d = prob - if prob.nout == 1 # TODO fix upstream in quadrature, expected sizes depend on quadrature method is requires different copying based on nout > 1 - set_result! = @inline function (dx, sol) - dx[:] .= sol[:] - end - else - set_result! = @inline function (dx, sol) - dx .= reshape(sol[:, :], size(dx)) - end - end - - prob_func = function (prob, i, repeat, x) # TODO is it better to make prob/output funcs outside of integrand, then call w/ closure? - u0, p = h((_make_view(x, i)), prob.u0, prob.p) - remake(prob, u0 = u0, p = p) + function prob_func(prob, i, repeat, x) # TODO is it better to make prob/output funcs outside of integrand, then call w/ closure? + u0, _p = h((_make_view(x, i)), prob.u0, prob.p) + remake(prob, u0 = u0, p = _p) end output_func(sol, i, x) = (g(sol, sol.prob.p) * pdf(d, (_make_view(x, i))), false) - function (dx, x, p) - trajectories = size(x, 2) + function integrand_koopman_systemmap_batch(x, _) + trajectories = size(x)[end] # TODO How to inject ensemble method in solve? currently in SystemMap, but does that make sense? ensprob = EnsembleProblem(S.prob; output_func = (sol, i) -> output_func(sol, i, x), prob_func = (prob, i, repeat) -> prob_func(prob, i, repeat, x)) - sol = solve(ensprob, S.alg, S.ensemblealg; trajectories = trajectories, S.kwargs...) - set_result!(dx, sol) + solve(ensprob, S.alg, S.ensemblealg; trajectories = trajectories, S.kwargs...) + end + function integrand_koopman_systemmap_batch!(dx, x, p) + dx .= integrand_koopman_systemmap_batch(x, p) nothing end + proto_sol = integrand_koopman_systemmap_batch(reshape(collect(mid), size(mid)..., 1), p) + prototype = Array(proto_sol) + return BatchIntegralFunction{true}(integrand_koopman_systemmap_batch!, prototype; max_batch=batch) end -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{false}) where {F <: ProcessNoiseSystemMap} - error("Out of position problems currently not supported") +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, + ::Nothing) where {F <: ProcessNoiseSystemMap} + error("Batching is required for Koopman ProcessNoiseSystemMap") end -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{true}) where {F <: ProcessNoiseSystemMap} +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, p, + batch::Integer) where {F <: ProcessNoiseSystemMap} @unpack S, g, h, d = prob - if prob.nout == 1 # TODO fix upstream in quadrature, expected sizes depend on quadrature method is requires different copying based on nout > 1 - set_result! = @inline function (dx, sol) - dx[:] .= sol[:] - end - else - set_result! = @inline function (dx, sol) - dx .= reshape(sol[:, :], size(dx)) - end - end - prob_func = function (prob, i, repeat, x) Z, p = h((_make_view(x, i)), prob.u0, prob.p) function W(u, p, t) @@ -121,16 +108,21 @@ function build_integrand(prob::ExpectationProblem{F}, ::Koopman, output_func(sol, i, x) = (g(sol, sol.prob.p) * pdf(d, (_make_view(x, i))), false) - function (dx, x, p) - trajectories = size(x, 2) + function integrand_koopman_processnoisesystemmap_batch(x, p) + trajectories = size(x)[end] # TODO How to inject ensemble method in solve? currently in SystemMap, but does that make sense? ensprob = EnsembleProblem(S.prob; output_func = (sol, i) -> output_func(sol, i, x), prob_func = (prob, i, repeat) -> prob_func(prob, i, repeat, x)) - sol = solve(ensprob, S.args...; trajectories = trajectories, S.kwargs...) - set_result!(dx, sol) + solve(ensprob, S.args...; trajectories = trajectories, S.kwargs...) + end + function integrand_koopman_processnoisesystemmap_batch!(dx, x, p) + dx .= integrand_koopman_processnoisesystemmap_batch(x, p) nothing end + proto_sol = integrand_koopman_processnoisesystemmap_batch(reshape(collect(mid), size(mid)..., 1), p) + prototype = Array(proto_sol) + return BatchIntegralFunction{true}(integrand_koopman_processnoisesystemmap_batch!, prototype; max_batch=batch) end """ @@ -198,7 +190,7 @@ end """ ```julia solve(exprob::ExpectationProblem, expalg::Koopman; - maxiters = 1000000, batch = 0, quadalg = HCubatureJL(), + maxiters = 1000000, batch = nothing, quadalg = HCubatureJL(), ireltol = 1e-2, iabstol = 1e-2, kwargs...) ``` @@ -206,37 +198,34 @@ Solve an `ExpectationProblem` using Koopman integration. """ function DiffEqBase.solve(prob::ExpectationProblem, expalg::Koopman, args...; maxiters = 1000000, - batch = 0, + batch = nothing, quadalg = HCubatureJL(), ireltol = 1e-2, iabstol = 1e-2, kwargs...) - integrand = build_integrand(prob, expalg, Val(batch > 0)) - lb, ub = extrema(prob.d) + domain = extrema(prob.d) + integrand = build_integrand(prob, expalg, sum(domain)/2, prob.params, batch) - sol = integrate(quadalg, expalg.sensealg, integrand, lb, ub, prob.params; + sol = integrate(quadalg, expalg.sensealg, integrand, domain, prob.params; reltol = ireltol, abstol = iabstol, maxiters = maxiters, - nout = prob.nout, batch = batch, kwargs...) return ExpectationSolution(sol.u, sol.resid, sol) end # Integrate function to test new Adjoints, will need to roll up to Integrals.jl -function integrate(quadalg, adalg::AbstractExpectationADAlgorithm, f, lb::TB, ub::TB, p; - nout = 1, batch = 0, - kwargs...) where {TB} - #TODO check batch iip type stability w/ IntegralProblem{XXXX} - prob = IntegralProblem{batch > 0}(f, lb, ub, p; nout = nout, batch = batch) +function integrate(quadalg, adalg::AbstractExpectationADAlgorithm, f, domain, p; + kwargs...) + prob = IntegralProblem(f, domain, p) solve(prob, quadalg; kwargs...) end # defines adjoint via ∫∂/∂p f(x,p) dx -Zygote.@adjoint function integrate(quadalg, adalg::NonfusedAD, f::F, lb::T, ub::T, +Zygote.@adjoint function integrate(quadalg, adalg::NonfusedAD, f::F, domain, params::P; - nout = 1, batch = 0, norm = norm, - kwargs...) where {F, T, P} - primal = integrate(quadalg, adalg, f, lb, ub, params; - norm = norm, nout = nout, batch = batch, + # norm = norm, + kwargs...) where {F, P} + primal = integrate(quadalg, adalg, f, domain, params; + # norm = norm, kwargs...) function integrate_pullbacks(Δ) @@ -244,8 +233,8 @@ Zygote.@adjoint function integrate(quadalg, adalg::NonfusedAD, f::F, lb::T, ub:: _, back = Zygote.pullback(p -> f(x, p), params) back(Δ)[1] end - ∂p = integrate(quadalg, adalg, dfdp, lb, ub, params; - norm = norm, nout = nout * length(params), batch = batch, + ∂p = integrate(quadalg, adalg, dfdp, domain, params; + # norm = norm, kwargs...) # ∂lb = -f(lb,params) #needs correct for dim > 1 # ∂ub = f(ub,params) @@ -256,23 +245,25 @@ end # defines adjoint via ∫[f(x,p; ∂/∂p f(x,p)] dx, ie it fuses the primal, post the primal calculation # has flag to only compute quad norm with respect to only the primal in the pull-back. Gives same quadrature points as doing forwarddiff -Zygote.@adjoint function integrate(quadalg, adalg::PostfusedAD, f::F, lb::T, ub::T, +Zygote.@adjoint function integrate(quadalg, adalg::PostfusedAD, f::F, domain, params::P; - nout = 1, batch = 0, norm = norm, - kwargs...) where {F, T, P} - primal = integrate(quadalg, adalg, f, lb, ub, params; - norm = norm, nout = nout, batch = batch, + # norm = norm, + kwargs...) where {F, P} + @assert f isa IntegralFunction{false} "adjoint doesn't support in-place or batching" + primal = integrate(quadalg, adalg, f, domain, params; + norm = norm, kwargs...) - _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) + nout = length(primal) + # _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) function integrate_pullbacks(Δ) function dfdp(x, params) y, back = Zygote.pullback(p -> f(x, p), params) [y; back(Δ)[1]] #TODO need to match proper array type? promote_type??? end - ∂p = integrate(quadalg, adalg, dfdp, lb, ub, params; - norm = _norm, nout = nout + nout * length(params), batch = batch, + ∂p = integrate(quadalg, adalg, dfdp, domain, params; + # norm = _norm, kwargs...) return nothing, nothing, nothing, nothing, nothing, @view ∂p[(nout + 1):end] end @@ -280,20 +271,22 @@ Zygote.@adjoint function integrate(quadalg, adalg::PostfusedAD, f::F, lb::T, ub: end # Fuses primal and partials prior to pullback, I doubt this will stick around based on required system evals. -Zygote.@adjoint function integrate(quadalg, adalg::PrefusedAD, f::F, lb::T, ub::T, +Zygote.@adjoint function integrate(quadalg, adalg::PrefusedAD, f::F, domain, params::P; - nout = 1, batch = 0, norm = norm, - kwargs...) where {F, T, P} + # norm = norm, + kwargs...) where {F, P} + @assert f isa IntegralFunction{false} "adjoint doesn't support in-place or batching" # from Seth Axen via Slack # Does not work w/ ArrayPartition unless with following hack # Base.similar(A::ArrayPartition, ::Type{T}, dims::NTuple{N,Int}) where {T,N} = similar(Array(A), T, dims) # TODO add ArrayPartition similar fix upstream, see https://github.com/SciML/RecursiveArrayTools.jl/issues/135 ∂f_∂params(x, params) = only(Zygote.jacobian(p -> f(x, p), params)) f_augmented(x, params) = [f(x, params); ∂f_∂params(x, params)...] #TODO need to match proper array type? promote_type??? - _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) + nout = length(f(sum(domain)/2, params)) + # _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) - res = integrate(quadalg, adalg, f_augmented, lb, ub, params; - norm = _norm, nout = nout + nout * length(params), batch = batch, + res = integrate(quadalg, adalg, f_augmented, domain, params; + # norm = _norm, kwargs...) primal = first(res) function integrate_pullback(Δy) diff --git a/src/problem_types.jl b/src/problem_types.jl index 14ebc38..95f5b6f 100644 --- a/src/problem_types.jl +++ b/src/problem_types.jl @@ -2,9 +2,9 @@ abstract type AbstractUncertaintyProblem end """ ```julia -ExpectationProblem(S, g, h, pdist, params, nout) -ExpectationProblem(g, pdist, params; nout = 1) -ExpectationProblem(sm::SystemMap, g, h, d; nout = 1) +ExpectationProblem(S, g, h, pdist, params) +ExpectationProblem(g, pdist, params) +ExpectationProblem(sm::SystemMap, g, h, d) ``` Defines ∫ g(S(h(x,u0,p)))*f(x)dx @@ -18,7 +18,6 @@ Let 𝕏 = uncertainty space, 𝕌 = Initial condition space, ℙ = model parame - h: 𝕏 × 𝕌 × ℙ → 𝕌 × ℙ also known as covariate function. - pdf(d,x): 𝕏 → ℝ the uncertainty distribution of the initial states. - params - - nout """ struct ExpectationProblem{TS, TG, TH, TF, TP} <: AbstractUncertaintyProblem # defines ∫ g(S(h(x,u0,p)))*f(x)dx @@ -28,21 +27,21 @@ struct ExpectationProblem{TS, TG, TH, TF, TP} <: AbstractUncertaintyProblem h::TH # cov(input_func), h: 𝕏 × 𝕌 × ℙ → 𝕌 × ℙ d::TF # distribution, pdf(d,x): 𝕏 → ℝ params::TP - nout::Int end # Constructor for general maps/functions -function ExpectationProblem(g, pdist, params; nout = 1) +function ExpectationProblem(g, pdist, params; nout = nothing) + !isnothing(nout) && @warn "nout is deprecated and unused" h(x, u, p) = x, p S(x, p) = x - ExpectationProblem(S, g, h, pdist, params, nout) + ExpectationProblem(S, g, h, pdist, params) end # Constructor for DEProblems -function ExpectationProblem(sm::SystemMap, g, h, d; nout = 1) +function ExpectationProblem(sm::SystemMap, g, h, d; nout = nothing) + !isnothing(nout) && @warn "nout is deprecated and unused" ExpectationProblem(sm, g, h, d, - ArrayPartition(deepcopy(sm.prob.u0), deepcopy(sm.prob.p)), - nout) + ArrayPartition(deepcopy(sm.prob.u0), deepcopy(sm.prob.p))) end distribution(prob::ExpectationProblem) = prob.d @@ -58,7 +57,8 @@ parameters(prob::ExpectationProblem) = prob.params # exp_prob::ExpectationProblem # end -function ExpectationProblem(sm::ProcessNoiseSystemMap, g, h; nout = 1) +function ExpectationProblem(sm::ProcessNoiseSystemMap, g, h; nout = nothing) + !isnothing(nout) && @warn "nout is deprecated and unused" d = GenericDistribution((Truncated(Normal(), -4.0, 4.0) for i in 1:(sm.n))...) - ExpectationProblem(sm, g, h, d, deepcopy(sm.prob.p), nout) + ExpectationProblem(sm, g, h, d, deepcopy(sm.prob.p)) end diff --git a/test/differentiation.jl b/test/differentiation.jl index c54d3c0..9d54379 100644 --- a/test/differentiation.jl +++ b/test/differentiation.jl @@ -22,7 +22,7 @@ include("setup.jl") end @testset "Correctness" begin fd = FiniteDiff.finite_difference_derivative(loss, 0.0) - @test VectorOfArray(ForwardDiff.derivative(loss, 0.0))≈fd rtol=1e-4 + @test ForwardDiff.derivative(loss, 0.0)≈fd rtol=1e-3 end @testset "Type Stability" begin pt = 0.0 #@test_broken @constinferred ForwardDiff.derivative(loss, pt) diff --git a/test/interface.jl b/test/interface.jl index c0d6881..2ef5f42 100644 --- a/test/interface.jl +++ b/test/interface.jl @@ -5,13 +5,6 @@ using Test, TestExtras, const DEU = SciMLExpectations include("setup.jl") -function (sm::SystemMap{DT})(u0, p) where {DT} - prob::DT = remake(sm.prob, - u0 = convert(typeof(sm.prob.u0), u0), - p = convert(typeof(sm.prob.p), p)) - solve(prob, sm.alg; sm.kwargs...) -end - @testset "GenericDistribution" begin dists = (Uniform(1, 2), Uniform(3, 4), Normal(0, 1), truncated(Normal(0, 1), -3, 3)) x = [mean(d) for d in dists] @@ -61,8 +54,8 @@ end end DEU.parameters) dists = (Uniform(1, 2), Uniform(3, 4), truncated(Normal(0, 1), -5, 5)) gd = GenericDistribution(dists...) + x = [mean(d) for d in dists] @testset "DiffEq" begin - x = [mean(d) for d in dists] h(x, u, p) = x, p prob = ODEProblem(eoms[1], u0s[1], tspan, ps[1]) sm = SystemMap(prob, Tsit5(); saveat = 1.0) @@ -73,21 +66,21 @@ end end for foo in getters @constinferred foo(ep) end - f = @constinferred build_integrand(ep, Koopman(), Val(false)) + f = @constinferred build_integrand(ep, Koopman(), x, DEU.parameters(ep), nothing) @constinferred f(x, DEU.parameters(ep)) - fbatch = @constinferred build_integrand(ep, Koopman(), Val(true)) + fbatch = #= @constinferred =# build_integrand(ep, Koopman(), x, DEU.parameters(ep), 10) y = reshape(repeat(x, outer = 5), :, 5) dy = similar(y[1, :]) @constinferred fbatch(dy, y, DEU.parameters(ep)) # nout > 1 g2(soln, p) = [soln[1, end], soln[2, end]] - ep = @constinferred ExpectationProblem(sm, g2, h, gd; nout = 2) - f = @constinferred build_integrand(ep, Koopman(), Val(false)) + ep = @constinferred ExpectationProblem(sm, g2, h, gd) + f = @constinferred build_integrand(ep, Koopman(), x, DEU.parameters(ep), nothing) @constinferred f(x, DEU.parameters(ep)) - fbatch = @constinferred build_integrand(ep, Koopman(), Val(true)) + fbatch = #= @constinferred =# build_integrand(ep, Koopman(), x, DEU.parameters(ep), 10) y = reshape(repeat(x, outer = 5), :, 5) dy = similar(y[1:2, :]) @constinferred fbatch(dy, y, DEU.parameters(ep)) @@ -98,7 +91,7 @@ end end for foo in getters @constinferred foo(ep) end - f = @constinferred build_integrand(ep, Koopman(), Val(false)) + f = @constinferred build_integrand(ep, Koopman(), x, DEU.parameters(ep), nothing) @constinferred f([0.0, 1.0, 2.0], DEU.parameters(ep)) end end diff --git a/test/processnoise.jl b/test/processnoise.jl index a3375b2..d256708 100644 --- a/test/processnoise.jl +++ b/test/processnoise.jl @@ -1,7 +1,7 @@ using Test, TestExtras using SciMLExpectations -using IntegralsCuba +using Cuba using StochasticDiffEq using DiffEqNoiseProcess using Distributions @@ -15,7 +15,7 @@ prob = SDEProblem(f, g, u0, (0.0, 1.0), noise = W) sm = ProcessNoiseSystemMap(prob, 8, LambaEM(), abstol = 1e-3, reltol = 1e-3) cov(x, u, p) = x, p observed(sol, p) = sol[:, end] -exprob = ExpectationProblem(sm, observed, cov; nout = length(u0)) +exprob = ExpectationProblem(sm, observed, cov) sol1 = solve(exprob, Koopman(), ireltol = 1e-3, iabstol = 1e-3, batch = 64, quadalg = CubaDivonne()) sol2 = solve(exprob, MonteCarlo(1_000_000)) diff --git a/test/runtests.jl b/test/runtests.jl index 91b3a0a..4c66355 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,7 +1,9 @@ -using SafeTestsets +using SafeTestsets, Test -@safetestset "Quality Assurance" begin include("qa.jl") end -@safetestset "Expectation Process Noise Tests" begin include("processnoise.jl") end -@safetestset "Expectation Interface Tests" begin include("interface.jl") end -@safetestset "Expectation Solve Tests" begin include("solve.jl") end -@safetestset "Expectation Differentiation Tests" begin include("differentiation.jl") end +@testset "Integrals" begin + @safetestset "Quality Assurance" include("qa.jl") + @safetestset "Expectation Process Noise Tests" include("processnoise.jl") + @safetestset "Expectation Interface Tests" include("interface.jl") + @safetestset "Expectation Solve Tests" include("solve.jl") + @safetestset "Expectation Differentiation Tests" include("differentiation.jl") +end diff --git a/test/solve.jl b/test/solve.jl index 10bc5f6..9bdd708 100644 --- a/test/solve.jl +++ b/test/solve.jl @@ -1,6 +1,6 @@ using Test, TestExtras, SciMLExpectations, OrdinaryDiffEq, Distributions, - Integrals, IntegralsCubature, IntegralsCuba, + Integrals, Cubature, Cuba, StaticArrays, ComponentArrays, Random quadalgs = [ @@ -50,7 +50,7 @@ quadalgs_batch = [CubatureJLh(), CubatureJLp(), CubaSUAVE(), CubaDivonne(), Cuba end @testset "Vector-Valued Observable (nout > 1)" begin g(sol, p) = sol[:, end] - exprob = ExpectationProblem(sm, g, cov, gd; nout = length(u0)) + exprob = ExpectationProblem(sm, g, cov, gd) for alg in quadalgs @test solve(exprob, Koopman(); quadalg = alg, ireltol = 1e-3, iabstol = 1e-3).u≈analytical rtol=1e-2 @@ -82,7 +82,7 @@ end @testset "Vector-Valued Observable (nout > 1)" begin g(u, p) = [sum(p .* sin.(u[1])) + cos(u[2]), cos(u[2])] analytical = [2 * sin(1 / 2)^2 * sum(p) + 1 / sqrt(exp(1)), 1 / sqrt(exp(1))] - exprob = ExpectationProblem(g, gd, p; nout = 2) + exprob = ExpectationProblem(g, gd, p) for alg in quadalgs @test solve(exprob, Koopman(); quadalg = alg).u≈analytical rtol=1e-2 end