From a486682616f3fb357ac1b91deb6fc4346d920017 Mon Sep 17 00:00:00 2001 From: lxvm Date: Sun, 18 Feb 2024 17:15:09 -0500 Subject: [PATCH 1/9] fix nout bug --- src/expectation.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/expectation.jl b/src/expectation.jl index edb6fde..d1223cf 100644 --- a/src/expectation.jl +++ b/src/expectation.jl @@ -63,7 +63,7 @@ function build_integrand(prob::ExpectationProblem{F}, ::Koopman, if prob.nout == 1 # TODO fix upstream in quadrature, expected sizes depend on quadrature method is requires different copying based on nout > 1 set_result! = @inline function (dx, sol) - dx[:] .= sol[:] + dx[:] .= sol end else set_result! = @inline function (dx, sol) From f32b4e7098ae388cb84f858f7f02a29afebea081 Mon Sep 17 00:00:00 2001 From: lxvm Date: Sun, 18 Feb 2024 17:15:21 -0500 Subject: [PATCH 2/9] fix diff test --- test/differentiation.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/differentiation.jl b/test/differentiation.jl index c54d3c0..9d54379 100644 --- a/test/differentiation.jl +++ b/test/differentiation.jl @@ -22,7 +22,7 @@ include("setup.jl") end @testset "Correctness" begin fd = FiniteDiff.finite_difference_derivative(loss, 0.0) - @test VectorOfArray(ForwardDiff.derivative(loss, 0.0))≈fd rtol=1e-4 + @test ForwardDiff.derivative(loss, 0.0)≈fd rtol=1e-3 end @testset "Type Stability" begin pt = 0.0 #@test_broken @constinferred ForwardDiff.derivative(loss, pt) From 2de1f269ee703ff9b189f1d8a0ea2eb10733f89c Mon Sep 17 00:00:00 2001 From: lxvm Date: Sun, 18 Feb 2024 18:18:07 -0500 Subject: [PATCH 3/9] remove a deprecation warning --- src/expectation.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/expectation.jl b/src/expectation.jl index d1223cf..87e1fb7 100644 --- a/src/expectation.jl +++ b/src/expectation.jl @@ -227,7 +227,7 @@ function integrate(quadalg, adalg::AbstractExpectationADAlgorithm, f, lb::TB, ub kwargs...) where {TB} #TODO check batch iip type stability w/ IntegralProblem{XXXX} batch = batch==0 ? nothing : batch - prob = IntegralProblem(f, lb, ub, p; nout = nout, batch = batch) + prob = IntegralProblem(f, (lb, ub), p; nout = nout, batch = batch) solve(prob, quadalg; kwargs...) end From 748a13627e3e214a6876d7d6f76b3fd5dd531b5f Mon Sep 17 00:00:00 2001 From: lxvm Date: Mon, 19 Feb 2024 14:17:13 -0500 Subject: [PATCH 4/9] deprecate nout --- src/problem_types.jl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/problem_types.jl b/src/problem_types.jl index 14ebc38..95f5b6f 100644 --- a/src/problem_types.jl +++ b/src/problem_types.jl @@ -2,9 +2,9 @@ abstract type AbstractUncertaintyProblem end """ ```julia -ExpectationProblem(S, g, h, pdist, params, nout) -ExpectationProblem(g, pdist, params; nout = 1) -ExpectationProblem(sm::SystemMap, g, h, d; nout = 1) +ExpectationProblem(S, g, h, pdist, params) +ExpectationProblem(g, pdist, params) +ExpectationProblem(sm::SystemMap, g, h, d) ``` Defines ∫ g(S(h(x,u0,p)))*f(x)dx @@ -18,7 +18,6 @@ Let 𝕏 = uncertainty space, 𝕌 = Initial condition space, ℙ = model parame - h: 𝕏 × 𝕌 × ℙ → 𝕌 × ℙ also known as covariate function. - pdf(d,x): 𝕏 → ℝ the uncertainty distribution of the initial states. - params - - nout """ struct ExpectationProblem{TS, TG, TH, TF, TP} <: AbstractUncertaintyProblem # defines ∫ g(S(h(x,u0,p)))*f(x)dx @@ -28,21 +27,21 @@ struct ExpectationProblem{TS, TG, TH, TF, TP} <: AbstractUncertaintyProblem h::TH # cov(input_func), h: 𝕏 × 𝕌 × ℙ → 𝕌 × ℙ d::TF # distribution, pdf(d,x): 𝕏 → ℝ params::TP - nout::Int end # Constructor for general maps/functions -function ExpectationProblem(g, pdist, params; nout = 1) +function ExpectationProblem(g, pdist, params; nout = nothing) + !isnothing(nout) && @warn "nout is deprecated and unused" h(x, u, p) = x, p S(x, p) = x - ExpectationProblem(S, g, h, pdist, params, nout) + ExpectationProblem(S, g, h, pdist, params) end # Constructor for DEProblems -function ExpectationProblem(sm::SystemMap, g, h, d; nout = 1) +function ExpectationProblem(sm::SystemMap, g, h, d; nout = nothing) + !isnothing(nout) && @warn "nout is deprecated and unused" ExpectationProblem(sm, g, h, d, - ArrayPartition(deepcopy(sm.prob.u0), deepcopy(sm.prob.p)), - nout) + ArrayPartition(deepcopy(sm.prob.u0), deepcopy(sm.prob.p))) end distribution(prob::ExpectationProblem) = prob.d @@ -58,7 +57,8 @@ parameters(prob::ExpectationProblem) = prob.params # exp_prob::ExpectationProblem # end -function ExpectationProblem(sm::ProcessNoiseSystemMap, g, h; nout = 1) +function ExpectationProblem(sm::ProcessNoiseSystemMap, g, h; nout = nothing) + !isnothing(nout) && @warn "nout is deprecated and unused" d = GenericDistribution((Truncated(Normal(), -4.0, 4.0) for i in 1:(sm.n))...) - ExpectationProblem(sm, g, h, d, deepcopy(sm.prob.p), nout) + ExpectationProblem(sm, g, h, d, deepcopy(sm.prob.p)) end From c4560ea6675d8a906b8b4fc37a6b0c69b8cd3ba2 Mon Sep 17 00:00:00 2001 From: lxvm Date: Mon, 19 Feb 2024 14:17:27 -0500 Subject: [PATCH 5/9] remove nout from tests --- test/solve.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/solve.jl b/test/solve.jl index a80b4b0..9bdd708 100644 --- a/test/solve.jl +++ b/test/solve.jl @@ -50,7 +50,7 @@ quadalgs_batch = [CubatureJLh(), CubatureJLp(), CubaSUAVE(), CubaDivonne(), Cuba end @testset "Vector-Valued Observable (nout > 1)" begin g(sol, p) = sol[:, end] - exprob = ExpectationProblem(sm, g, cov, gd; nout = length(u0)) + exprob = ExpectationProblem(sm, g, cov, gd) for alg in quadalgs @test solve(exprob, Koopman(); quadalg = alg, ireltol = 1e-3, iabstol = 1e-3).u≈analytical rtol=1e-2 @@ -82,7 +82,7 @@ end @testset "Vector-Valued Observable (nout > 1)" begin g(u, p) = [sum(p .* sin.(u[1])) + cos(u[2]), cos(u[2])] analytical = [2 * sin(1 / 2)^2 * sum(p) + 1 / sqrt(exp(1)), 1 / sqrt(exp(1))] - exprob = ExpectationProblem(g, gd, p; nout = 2) + exprob = ExpectationProblem(g, gd, p) for alg in quadalgs @test solve(exprob, Koopman(); quadalg = alg).u≈analytical rtol=1e-2 end From f4e16a636e43a395b4b5d9c55dc5fc04908fd644 Mon Sep 17 00:00:00 2001 From: lxvm Date: Mon, 19 Feb 2024 14:17:41 -0500 Subject: [PATCH 6/9] Integrals 4 updates --- src/expectation.jl | 138 +++++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 73 deletions(-) diff --git a/src/expectation.jl b/src/expectation.jl index 87e1fb7..4c8a2bf 100644 --- a/src/expectation.jl +++ b/src/expectation.jl @@ -32,21 +32,23 @@ struct MonteCarlo <: AbstractExpectationAlgorithm end # Builds integrand for arbitrary functions -function build_integrand(prob::ExpectationProblem, ::Koopman, ::Val{false}) +function build_integrand(prob::ExpectationProblem, ::Koopman, mid, p, ::Nothing) @unpack g, d = prob - function (x, p) + function integrand_koopman(x, p) g(x, p) * pdf(d, x) end + return IntegralFunction{false}(integrand_koopman, nothing) end # Builds integrand for DEProblems -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{false}) where {F <: SystemMap} +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, p, + ::Nothing) where {F <: SystemMap} @unpack S, g, h, d = prob - function (x, p) + function integrand_koopman_systemmap(x, p) ū, p̄ = h(x, p.x[1], p.x[2]) g(S(ū, p̄), p̄) * pdf(d, x) end + return IntegralFunction{false}(integrand_koopman_systemmap, nothing) end function _make_view(x::Union{Vector{T}, Adjoint{T, Vector{T}}}, i) where {T} @@ -57,20 +59,10 @@ function _make_view(x, i) @view x[:, i] end -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{true}) where {F <: SystemMap} +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, p, + batch::Integer) where {F <: SystemMap} @unpack S, g, h, d = prob - if prob.nout == 1 # TODO fix upstream in quadrature, expected sizes depend on quadrature method is requires different copying based on nout > 1 - set_result! = @inline function (dx, sol) - dx[:] .= sol - end - else - set_result! = @inline function (dx, sol) - dx .= reshape(sol[:, :], size(dx)) - end - end - prob_func = function (prob, i, repeat, x) # TODO is it better to make prob/output funcs outside of integrand, then call w/ closure? u0, p = h((_make_view(x, i)), prob.u0, prob.p) remake(prob, u0 = u0, p = p) @@ -78,37 +70,32 @@ function build_integrand(prob::ExpectationProblem{F}, ::Koopman, output_func(sol, i, x) = (g(sol, sol.prob.p) * pdf(d, (_make_view(x, i))), false) - function (dx, x, p) - trajectories = size(x, 2) + function integrand_koopman_systemmap_batch(x, p) + trajectories = size(x)[end] # TODO How to inject ensemble method in solve? currently in SystemMap, but does that make sense? ensprob = EnsembleProblem(S.prob; output_func = (sol, i) -> output_func(sol, i, x), prob_func = (prob, i, repeat) -> prob_func(prob, i, repeat, x)) - sol = solve(ensprob, S.alg, S.ensemblealg; trajectories = trajectories, S.kwargs...) - set_result!(dx, sol) + solve(ensprob, S.alg, S.ensemblealg; trajectories = trajectories, S.kwargs...) + end + function integrand_koopman_systemmap_batch!(dx, x, p) + dx .= integrand_koopman_systemmap_batch(x, p) nothing end + proto_sol = integrand_koopman_systemmap_batch(reshape(collect(mid), size(mid)..., 1), p) + prototype = Array(proto_sol) + return BatchIntegralFunction{true}(integrand_koopman_systemmap_batch!, prototype; max_batch=batch) end -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{false}) where {F <: ProcessNoiseSystemMap} - error("Out of position problems currently not supported") +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, + ::Nothing) where {F <: ProcessNoiseSystemMap} + error("Batching is required for Koopman ProcessNoiseSystemMap") end -function build_integrand(prob::ExpectationProblem{F}, ::Koopman, - ::Val{true}) where {F <: ProcessNoiseSystemMap} +function build_integrand(prob::ExpectationProblem{F}, ::Koopman, mid, p, + batch::Integer) where {F <: ProcessNoiseSystemMap} @unpack S, g, h, d = prob - if prob.nout == 1 # TODO fix upstream in quadrature, expected sizes depend on quadrature method is requires different copying based on nout > 1 - set_result! = @inline function (dx, sol) - dx[:] .= sol[:] - end - else - set_result! = @inline function (dx, sol) - dx .= reshape(sol[:, :], size(dx)) - end - end - prob_func = function (prob, i, repeat, x) Z, p = h((_make_view(x, i)), prob.u0, prob.p) function W(u, p, t) @@ -121,16 +108,21 @@ function build_integrand(prob::ExpectationProblem{F}, ::Koopman, output_func(sol, i, x) = (g(sol, sol.prob.p) * pdf(d, (_make_view(x, i))), false) - function (dx, x, p) - trajectories = size(x, 2) + function integrand_koopman_processnoisesystemmap_batch(x, p) + trajectories = size(x)[end] # TODO How to inject ensemble method in solve? currently in SystemMap, but does that make sense? ensprob = EnsembleProblem(S.prob; output_func = (sol, i) -> output_func(sol, i, x), prob_func = (prob, i, repeat) -> prob_func(prob, i, repeat, x)) - sol = solve(ensprob, S.args...; trajectories = trajectories, S.kwargs...) - set_result!(dx, sol) + solve(ensprob, S.args...; trajectories = trajectories, S.kwargs...) + end + function integrand_koopman_processnoisesystemmap_batch!(dx, x, p) + dx .= integrand_koopman_processnoisesystemmap_batch(x, p) nothing end + proto_sol = integrand_koopman_processnoisesystemmap_batch(reshape(collect(mid), size(mid)..., 1), p) + prototype = Array(proto_sol) + return BatchIntegralFunction{true}(integrand_koopman_processnoisesystemmap_batch!, prototype; max_batch=batch) end """ @@ -198,7 +190,7 @@ end """ ```julia solve(exprob::ExpectationProblem, expalg::Koopman; - maxiters = 1000000, batch = 0, quadalg = HCubatureJL(), + maxiters = 1000000, batch = nothing, quadalg = HCubatureJL(), ireltol = 1e-2, iabstol = 1e-2, kwargs...) ``` @@ -206,38 +198,34 @@ Solve an `ExpectationProblem` using Koopman integration. """ function DiffEqBase.solve(prob::ExpectationProblem, expalg::Koopman, args...; maxiters = 1000000, - batch = 0, + batch = nothing, quadalg = HCubatureJL(), ireltol = 1e-2, iabstol = 1e-2, kwargs...) - integrand = build_integrand(prob, expalg, Val(batch > 0)) - lb, ub = extrema(prob.d) + domain = extrema(prob.d) + integrand = build_integrand(prob, expalg, sum(domain)/2, prob.params, batch) - sol = integrate(quadalg, expalg.sensealg, integrand, lb, ub, prob.params; + sol = integrate(quadalg, expalg.sensealg, integrand, domain, prob.params; reltol = ireltol, abstol = iabstol, maxiters = maxiters, - nout = prob.nout, batch = batch, kwargs...) return ExpectationSolution(sol.u, sol.resid, sol) end # Integrate function to test new Adjoints, will need to roll up to Integrals.jl -function integrate(quadalg, adalg::AbstractExpectationADAlgorithm, f, lb::TB, ub::TB, p; - nout = 1, batch = 0, - kwargs...) where {TB} - #TODO check batch iip type stability w/ IntegralProblem{XXXX} - batch = batch==0 ? nothing : batch - prob = IntegralProblem(f, (lb, ub), p; nout = nout, batch = batch) +function integrate(quadalg, adalg::AbstractExpectationADAlgorithm, f, domain, p; + kwargs...) + prob = IntegralProblem(f, domain, p) solve(prob, quadalg; kwargs...) end # defines adjoint via ∫∂/∂p f(x,p) dx -Zygote.@adjoint function integrate(quadalg, adalg::NonfusedAD, f::F, lb::T, ub::T, +Zygote.@adjoint function integrate(quadalg, adalg::NonfusedAD, f::F, domain, params::P; - nout = 1, batch = 0, norm = norm, - kwargs...) where {F, T, P} - primal = integrate(quadalg, adalg, f, lb, ub, params; - norm = norm, nout = nout, batch = batch, + # norm = norm, + kwargs...) where {F, P} + primal = integrate(quadalg, adalg, f, domain, params; + # norm = norm, kwargs...) function integrate_pullbacks(Δ) @@ -245,8 +233,8 @@ Zygote.@adjoint function integrate(quadalg, adalg::NonfusedAD, f::F, lb::T, ub:: _, back = Zygote.pullback(p -> f(x, p), params) back(Δ)[1] end - ∂p = integrate(quadalg, adalg, dfdp, lb, ub, params; - norm = norm, nout = nout * length(params), batch = batch, + ∂p = integrate(quadalg, adalg, dfdp, domain, params; + # norm = norm, kwargs...) # ∂lb = -f(lb,params) #needs correct for dim > 1 # ∂ub = f(ub,params) @@ -257,23 +245,25 @@ end # defines adjoint via ∫[f(x,p; ∂/∂p f(x,p)] dx, ie it fuses the primal, post the primal calculation # has flag to only compute quad norm with respect to only the primal in the pull-back. Gives same quadrature points as doing forwarddiff -Zygote.@adjoint function integrate(quadalg, adalg::PostfusedAD, f::F, lb::T, ub::T, +Zygote.@adjoint function integrate(quadalg, adalg::PostfusedAD, f::F, domain, params::P; - nout = 1, batch = 0, norm = norm, - kwargs...) where {F, T, P} - primal = integrate(quadalg, adalg, f, lb, ub, params; - norm = norm, nout = nout, batch = batch, + # norm = norm, + kwargs...) where {F, P} + @assert f isa IntegralFunction{false} "adjoint doesn't support in-place or batching" + primal = integrate(quadalg, adalg, f, domain, params; + norm = norm, kwargs...) - _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) + nout = length(primal) + # _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) function integrate_pullbacks(Δ) function dfdp(x, params) y, back = Zygote.pullback(p -> f(x, p), params) [y; back(Δ)[1]] #TODO need to match proper array type? promote_type??? end - ∂p = integrate(quadalg, adalg, dfdp, lb, ub, params; - norm = _norm, nout = nout + nout * length(params), batch = batch, + ∂p = integrate(quadalg, adalg, dfdp, domain, params; + # norm = _norm, kwargs...) return nothing, nothing, nothing, nothing, nothing, @view ∂p[(nout + 1):end] end @@ -281,20 +271,22 @@ Zygote.@adjoint function integrate(quadalg, adalg::PostfusedAD, f::F, lb::T, ub: end # Fuses primal and partials prior to pullback, I doubt this will stick around based on required system evals. -Zygote.@adjoint function integrate(quadalg, adalg::PrefusedAD, f::F, lb::T, ub::T, +Zygote.@adjoint function integrate(quadalg, adalg::PrefusedAD, f::F, domain, params::P; - nout = 1, batch = 0, norm = norm, - kwargs...) where {F, T, P} + # norm = norm, + kwargs...) where {F, P} + @assert f isa IntegralFunction{false} "adjoint doesn't support in-place or batching" # from Seth Axen via Slack # Does not work w/ ArrayPartition unless with following hack # Base.similar(A::ArrayPartition, ::Type{T}, dims::NTuple{N,Int}) where {T,N} = similar(Array(A), T, dims) # TODO add ArrayPartition similar fix upstream, see https://github.com/SciML/RecursiveArrayTools.jl/issues/135 ∂f_∂params(x, params) = only(Zygote.jacobian(p -> f(x, p), params)) f_augmented(x, params) = [f(x, params); ∂f_∂params(x, params)...] #TODO need to match proper array type? promote_type??? - _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) + nout = length(f(sum(domain)/2, params)) + # _norm = adalg.norm_partials ? norm : primalnorm(nout, norm) - res = integrate(quadalg, adalg, f_augmented, lb, ub, params; - norm = _norm, nout = nout + nout * length(params), batch = batch, + res = integrate(quadalg, adalg, f_augmented, domain, params; + # norm = _norm, kwargs...) primal = first(res) function integrate_pullback(Δy) From 04dccc72b59ebdba809a1bf5e5a1fa4103ca6e2a Mon Sep 17 00:00:00 2001 From: lxvm Date: Mon, 19 Feb 2024 14:24:51 -0500 Subject: [PATCH 7/9] update docs --- README.md | 4 +- docs/src/tutorials/gpu_bayesian.md | 2 +- docs/src/tutorials/introduction.md | 38 +++++++++---------- .../optimization_under_uncertainty.md | 8 ++-- docs/src/tutorials/process_noise.md | 2 +- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 815bc26..2b0c558 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ the documentation, which contains the unreleased features. ### Example ```julia -using SciMLExpectations, OrdinaryDiffEq, Distributions, IntegralsCubature +using SciMLExpectations, OrdinaryDiffEq, Distributions, Cubature function eom!(du, u, p, t, A) du .= A * u @@ -56,7 +56,7 @@ julia> analytical ```julia g(sol, p) = sol[:, end] -exprob = ExpectationProblem(sm, g, cov, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, cov, gd) sol = solve(exprob, Koopman(); quadalg = CubatureJLh(), ireltol = 1e-3, iabstol = 1e-3) sol.u # Expectation of the states 1 and 2 at the final time point diff --git a/docs/src/tutorials/gpu_bayesian.md b/docs/src/tutorials/gpu_bayesian.md index 39c3c11..a3efce9 100644 --- a/docs/src/tutorials/gpu_bayesian.md +++ b/docs/src/tutorials/gpu_bayesian.md @@ -166,7 +166,7 @@ ub = [p_kde_i.x[end] for p_kde_i in p_kde] #upper bound for integration gd = GenericDistribution(pdf_func, rand_func, lb, ub) h(x, u, p) = u, x sm = SystemMap(prob1, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman(); reltol = 1e-2, abstol = 1e-2) sol.u ``` diff --git a/docs/src/tutorials/introduction.md b/docs/src/tutorials/introduction.md index 27f9291..e06728f 100644 --- a/docs/src/tutorials/introduction.md +++ b/docs/src/tutorials/introduction.md @@ -62,7 +62,7 @@ using SciMLExpectations gd = GenericDistribution(u0_dist...) h(x, u, p) = x, p sm = SystemMap(prob, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, MonteCarlo(100000)) sol.u ``` @@ -115,7 +115,7 @@ Changing the distribution, we arrive at ```@example introduction u0_dist = [Uniform(0.0, 10.0)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, MonteCarlo(100000)) sol.u ``` @@ -125,7 +125,7 @@ and ```@example introduction u0_dist = [Uniform(0.0, 10.0)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -141,7 +141,7 @@ Note that the `Koopman()` algorithm doesn't currently support infinite or semi-i ```@example introduction u0_dist = [Normal(3.0, 2.0)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -157,7 +157,7 @@ Using a truncated distribution will alleviate this problem. However, there is an ```@example introduction u0_dist = [truncated(Normal(3.0, 2.0), -1000, 1000)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -167,7 +167,7 @@ whereas truncating at $\pm 4\sigma$ produces the correct result ```@example introduction u0_dist = [truncated(Normal(3.0, 2.0), -5, 11)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -177,20 +177,20 @@ If a large truncation is required, it is best practice to center the distributio ```@example introduction u0_dist = [truncated(Normal(3.0, 2.0), 3 - 1000, 3 + 1000)] gd = GenericDistribution(u0_dist...) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(u0)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` ## Vector-Valued Functions -`ExpectationProblem` can also handle vector-valued functions. Simply pass the vector-valued function and set the `nout` kwarg to the length of the vector the function returns. +`ExpectationProblem` can also handle vector-valued functions. Here, we demonstrate this by computing the expectation of `u` at `t=4.0s` and `t=6.0s` ```@example introduction g(sol, p) = [sol(4.0)[1], sol(6.0)[1]] -exprob = ExpectationProblem(sm, g, h, gd; nout = 2) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -208,7 +208,7 @@ saveat = tspan[1]:0.5:tspan[2] g(sol, p) = sol[1, :] prob = ODEProblem(f, u0, tspan, p, saveat = saveat) sm = SystemMap(prob, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(saveat)) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -234,21 +234,21 @@ end counter = [0, 0, 0] g(sol, p, power) = g(sol, p, power, counter) g(sol, p) = g(sol, p, 1) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` ```@example introduction g(sol, p) = g(sol, p, 2) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` ```@example introduction g(sol, p) = g(sol, p, 3) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -269,7 +269,7 @@ function g(sol, p, counter) end counter = [0] g(sol, p) = g(sol, p, counter) -exprob = ExpectationProblem(sm, g, h, gd; nout = 3) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u ``` @@ -305,7 +305,7 @@ function g(sol, p) x = sol(4.0)[1] [x, x^2] end -exprob = ExpectationProblem(sm, g, h, gd; nout = 2) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u mean_g = sol.u[1] @@ -327,7 +327,7 @@ saveat = tspan[1]:0.5:tspan[2] g(sol, p) = [sol[1, :]; sol[1, :] .^ 2] prob = ODEProblem(f, u0, tspan, p, saveat = saveat) sm = SystemMap(prob, Tsit5()) -exprob = ExpectationProblem(sm, g, h, gd; nout = length(saveat) * 2) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u @@ -350,7 +350,7 @@ function g(sol, p) x = sol(4.0)[1] [x, x^2, x^3] end -exprob = ExpectationProblem(sm, g, h, gd; nout = 3) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman()) sol.u mean_g = sol.u[1] @@ -379,7 +379,7 @@ gd = GenericDistribution(u0_dist, p_dist) g(sol, p) = sol(6.0)[1] h(x, u, p) = [x[1]], [x[2]] sm = SystemMap(prob, Tsit5(), EnsembleThreads()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) # batchmode = EnsembleThreads() #where to pass this? sol = solve(exprob, Koopman(), batch = 10, quadalg = CubaSUAVE()) sol.u @@ -428,7 +428,7 @@ g(sol, p) = sol(6.0f0)[1] h(x, u, p) = [x[1]], [x[2]] prob = ODEProblem(f, u0, tspan, p) sm = SystemMap(prob, Tsit5(), EnsembleCPUArray()) -exprob = ExpectationProblem(sm, g, h, gd; nout = 1) +exprob = ExpectationProblem(sm, g, h, gd) sol = solve(exprob, Koopman(), batch = 10, quadalg = CubaSUAVE()) sol.u ``` diff --git a/docs/src/tutorials/optimization_under_uncertainty.md b/docs/src/tutorials/optimization_under_uncertainty.md index 116a290..c9e99e5 100644 --- a/docs/src/tutorials/optimization_under_uncertainty.md +++ b/docs/src/tutorials/optimization_under_uncertainty.md @@ -106,7 +106,7 @@ using SciMLExpectations gd = GenericDistribution(cor_dist) h(x, u, p) = u, [p[1]; x[1]] sm = SystemMap(prob, Tsit5(), callback = cbs) -exprob = ExpectationProblem(sm, obs, h, gd; nout = 1) +exprob = ExpectationProblem(sm, obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) sol.u ``` @@ -121,7 +121,7 @@ make_u0(θ) = [θ[1], θ[2], θ[3], 0.0] function 𝔼_loss(θ, pars) prob = ODEProblem(ball!, make_u0(θ), tspan, p) sm = SystemMap(prob, Tsit5(), callback = cbs) - exprob = ExpectationProblem(sm, obs, h, gd; nout = 1) + exprob = ExpectationProblem(sm, obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) sol.u end @@ -228,7 +228,7 @@ Using the previously computed optimal initial conditions, let's compute the prob ```@example control sm = SystemMap(remake(prob, u0 = make_u0(minx)), Tsit5(), callback = cbs) -exprob = ExpectationProblem(sm, constraint_obs, h, gd; nout = 1) +exprob = ExpectationProblem(sm, constraint_obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) sol.u ``` @@ -239,7 +239,7 @@ We then set up the constraint function for NLopt just as before. function 𝔼_constraint(res, θ, pars) prob = ODEProblem(ball!, make_u0(θ), tspan, p) sm = SystemMap(prob, Tsit5(), callback = cbs) - exprob = ExpectationProblem(sm, constraint_obs, h, gd; nout = 1) + exprob = ExpectationProblem(sm, constraint_obs, h, gd) sol = solve(exprob, Koopman(), ireltol = 1e-5) res .= sol.u end diff --git a/docs/src/tutorials/process_noise.md b/docs/src/tutorials/process_noise.md index b7c3696..7d0a77e 100644 --- a/docs/src/tutorials/process_noise.md +++ b/docs/src/tutorials/process_noise.md @@ -19,7 +19,7 @@ prob = SDEProblem(f, g, u0, (0.0, 1.0), noise = W) sm = ProcessNoiseSystemMap(prob, 8, LambaEM(), abstol = 1e-3, reltol = 1e-3) cov(x, u, p) = x, p observed(sol, p) = sol[:, end] -exprob = ExpectationProblem(sm, observed, cov; nout = length(u0)) +exprob = ExpectationProblem(sm, observed, cov) sol1 = solve(exprob, Koopman(), ireltol = 1e-3, iabstol = 1e-3, batch = 64, quadalg = CubaDivonne()) sol1.u From e345319c3890f5bf1f5f35bb4d846ef06d389d57 Mon Sep 17 00:00:00 2001 From: lxvm Date: Thu, 22 Feb 2024 09:16:30 -0500 Subject: [PATCH 8/9] bump deps --- Project.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index c41ed44..7ce4fe3 100644 --- a/Project.toml +++ b/Project.toml @@ -30,7 +30,7 @@ DiffEqNoiseProcess = "5" Distributions = "0.23, 0.24, 0.25" FiniteDiff = "2" ForwardDiff = "0.10" -Integrals = "4" +Integrals = "4.4" KernelDensity = "0.6" LinearAlgebra = "1" OrdinaryDiffEq = "6" @@ -42,7 +42,7 @@ RecursiveArrayTools = "2, 3" Reexport = "0.2, 1.0" ReferenceTests = "0.10" SafeTestsets = "0.1" -SciMLBase = "1, 2" +SciMLBase = "2" StaticArrays = "1" Statistics = "1" StochasticDiffEq = "6" From 44d5742aa97814cacd500931f7d4f7b8d2dfe6fa Mon Sep 17 00:00:00 2001 From: Lorenzo Van Munoz <66997677+lxvm@users.noreply.github.com> Date: Thu, 22 Feb 2024 11:21:58 -0500 Subject: [PATCH 9/9] Update introduction.md no trivial batching --- docs/src/tutorials/introduction.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/tutorials/introduction.md b/docs/src/tutorials/introduction.md index e06728f..92d79c6 100644 --- a/docs/src/tutorials/introduction.md +++ b/docs/src/tutorials/introduction.md @@ -392,8 +392,8 @@ Now, let's compare the performance of the batch and non-batch modes ``` ```@example introduction -solve(exprob, Koopman(), batch = 0, quadalg = CubaSUAVE()) -@time solve(exprob, Koopman(), batch = 0, quadalg = CubaSUAVE()) +solve(exprob, Koopman(), quadalg = CubaSUAVE()) +@time solve(exprob, Koopman(), quadalg = CubaSUAVE()) ``` It is also possible to parallelize across the GPU. However, one must be careful of the limitations of ensemble solutions with the GPU. Please refer to [DiffEqGPU.jl](https://github.com/SciML/DiffEqGPU.jl) for details.