diff --git a/Project.toml b/Project.toml index b6cee24ab9..bb9281019e 100644 --- a/Project.toml +++ b/Project.toml @@ -71,6 +71,7 @@ Preferences = "1.3" Printf = "1.9" PyCall = "1.96" PythonCall = "0.9" +QuasiMonteCarlo = "0.3" RCall = "0.13.18" RecipesBase = "0.7.0, 0.8, 1.0" RecursiveArrayTools = "2.33" @@ -83,7 +84,6 @@ Statistics = "1" SymbolicIndexingInterface = "0.2" Tables = "1" TruncatedStacktraces = "1" -QuasiMonteCarlo = "0.3" Zygote = "0.6" julia = "1.9" diff --git a/ext/SciMLBaseChainRulesCoreExt.jl b/ext/SciMLBaseChainRulesCoreExt.jl index 51334f9378..8512fd1bf8 100644 --- a/ext/SciMLBaseChainRulesCoreExt.jl +++ b/ext/SciMLBaseChainRulesCoreExt.jl @@ -5,12 +5,12 @@ import ChainRulesCore import ChainRulesCore: NoTangent, @non_differentiable function ChainRulesCore.rrule(config::ChainRulesCore.RuleConfig{ - >:ChainRulesCore.HasReverseMode, - }, - ::typeof(getindex), - VA::ODESolution, - sym, - j::Integer) + >:ChainRulesCore.HasReverseMode, + }, + ::typeof(getindex), + VA::ODESolution, + sym, + j::Integer) function ODESolution_getindex_pullback(Δ) i = issymbollike(sym) ? sym_to_index(sym, VA) : sym if i === nothing @@ -94,11 +94,11 @@ function ChainRulesCore.rrule(::Type{SDEProblem}, args...; kwargs...) end function ChainRulesCore.rrule(::Type{ - <:ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, - T11, T12, - }}, u, - args...) where {T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, - T12} + <:ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, + T11, T12, + }}, u, + args...) where {T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, + T12} function ODESolutionAdjoint(ȳ) (NoTangent(), ȳ, ntuple(_ -> NoTangent(), length(args))...) end @@ -108,10 +108,10 @@ function ChainRulesCore.rrule(::Type{ end function ChainRulesCore.rrule(::Type{ - <:ODESolution{uType, tType, isinplace, P, NP, F, G, K, - ND, - }}, u, - args...) where {uType, tType, isinplace, P, NP, F, G, K, ND} + <:ODESolution{uType, tType, isinplace, P, NP, F, G, K, + ND, + }}, u, + args...) where {uType, tType, isinplace, P, NP, F, G, K, ND} function SDESolutionAdjoint(ȳ) (NoTangent(), ȳ, ntuple(_ -> NoTangent(), length(args))...) end @@ -132,4 +132,4 @@ function ChainRulesCore.rrule(::SciMLBase.EnsembleSolution, sim, time, converged out, EnsembleSolution_adjoint end -end \ No newline at end of file +end diff --git a/ext/SciMLBasePythonCallExt.jl b/ext/SciMLBasePythonCallExt.jl index 7426e4037f..6b5d920a77 100644 --- a/ext/SciMLBasePythonCallExt.jl +++ b/ext/SciMLBasePythonCallExt.jl @@ -13,7 +13,10 @@ function SciMLBase.numargs(f::Py) pyconvert(Int, length(first(inspect.getfullargspec(f2))) - inspect.ismethod(f2)) end -_pyconvert(x::Py) = pyisinstance(x, pybuiltins.list) ? _promoting_collect(_pyconvert(x) for x in x) : pyconvert(Any, x) +function _pyconvert(x::Py) + pyisinstance(x, pybuiltins.list) ? _promoting_collect(_pyconvert(x) for x in x) : + pyconvert(Any, x) +end _pyconvert(x::PyList) = _promoting_collect(_pyconvert(x) for x in x) _pyconvert(x) = x diff --git a/ext/SciMLBaseRCallExt.jl b/ext/SciMLBaseRCallExt.jl index 38a61a7f2c..1eda58cbee 100644 --- a/ext/SciMLBaseRCallExt.jl +++ b/ext/SciMLBaseRCallExt.jl @@ -8,4 +8,4 @@ function SciMLBase.isinplace(f::RFunction, args...; kwargs...) false end -end \ No newline at end of file +end diff --git a/ext/SciMLBaseZygoteExt.jl b/ext/SciMLBaseZygoteExt.jl index c0171f4d4a..307a86d77c 100644 --- a/ext/SciMLBaseZygoteExt.jl +++ b/ext/SciMLBaseZygoteExt.jl @@ -4,9 +4,9 @@ using Zygote using Zygote: @adjoint, pullback import Zygote: literal_getproperty using SciMLBase -using SciMLBase: ODESolution, issymbollike, sym_to_index, remake, - getobserved, build_solution, EnsembleSolution, - NonlinearSolution, AbstractTimeseriesSolution +using SciMLBase: ODESolution, issymbollike, sym_to_index, remake, + getobserved, build_solution, EnsembleSolution, + NonlinearSolution, AbstractTimeseriesSolution # This method resolves the ambiguity with the pullback defined in # RecursiveArrayToolsZygoteExt @@ -85,7 +85,7 @@ end end @adjoint function Zygote.literal_getproperty(sim::EnsembleSolution, - ::Val{:u}) + ::Val{:u}) sim.u, p̄ -> (EnsembleSolution(p̄, 0.0, true, sim.stats),) end @@ -107,17 +107,17 @@ end }(u, args...) where {T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12} - function ODESolutionAdjoint(ȳ) - (ȳ, ntuple(_ -> nothing, length(args))...) - end - - ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12}(u, args...), - ODESolutionAdjoint + function ODESolutionAdjoint(ȳ) + (ȳ, ntuple(_ -> nothing, length(args))...) + end + + ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12}(u, args...), + ODESolutionAdjoint end @adjoint function SDEProblem{uType, tType, isinplace, P, NP, F, G, K, ND}(u, - args...) where - {uType, tType, isinplace, P, NP, F, G, K, ND} + args...) where + {uType, tType, isinplace, P, NP, F, G, K, ND} function SDESolutionAdjoint(ȳ) (ȳ, ntuple(_ -> nothing, length(args))...) end @@ -126,16 +126,16 @@ end end @adjoint function NonlinearSolution{T, N, uType, R, P, A, O, uType2}(u, - args...) where { - T, - N, - uType, - R, - P, - A, - O, - uType2, -} + args...) where { + T, + N, + uType, + R, + P, + A, + O, + uType2, + } function NonlinearSolutionAdjoint(ȳ) (ȳ, ntuple(_ -> nothing, length(args))...) end @@ -143,7 +143,7 @@ end end @adjoint function literal_getproperty(sol::AbstractTimeseriesSolution, - ::Val{:u}) + ::Val{:u}) function solu_adjoint(Δ) zerou = zero(sol.prob.u0) _Δ = @. ifelse(Δ === nothing, (zerou,), Δ) @@ -153,7 +153,7 @@ end end @adjoint function literal_getproperty(sol::SciMLBase.AbstractNoTimeSolution, - ::Val{:u}) + ::Val{:u}) function solu_adjoint(Δ) zerou = zero(sol.prob.u0) _Δ = @. ifelse(Δ === nothing, zerou, Δ) @@ -163,7 +163,7 @@ end end @adjoint function literal_getproperty(sol::SciMLBase.OptimizationSolution, - ::Val{:u}) + ::Val{:u}) function solu_adjoint(Δ) zerou = zero(sol.u) _Δ = @. ifelse(Δ === nothing, zerou, Δ) @@ -214,8 +214,8 @@ end end @adjoint function SciMLBase.responsible_map(f, - args::Union{AbstractArray, Tuple - }...) + args::Union{AbstractArray, Tuple + }...) ∇responsible_map(__context__, f, args...) end diff --git a/src/SciMLBase.jl b/src/SciMLBase.jl index 28a832772c..a32b3d2f59 100644 --- a/src/SciMLBase.jl +++ b/src/SciMLBase.jl @@ -653,23 +653,23 @@ function unwrapped_f(f::FunctionWrappersWrappers.FunctionWrappersWrapper) end function specialization(::Union{ODEFunction{iip, specialize}, - SDEFunction{iip, specialize}, DDEFunction{iip, specialize}, - SDDEFunction{iip, specialize}, - DAEFunction{iip, specialize}, - DynamicalODEFunction{iip, specialize}, - SplitFunction{iip, specialize}, - DynamicalSDEFunction{iip, specialize}, - SplitSDEFunction{iip, specialize}, - DynamicalDDEFunction{iip, specialize}, - DiscreteFunction{iip, specialize}, - ImplicitDiscreteFunction{iip, specialize}, - RODEFunction{iip, specialize}, - NonlinearFunction{iip, specialize}, - OptimizationFunction{iip, specialize}, - BVPFunction{iip, specialize}, - IntegralFunction{iip, specialize}, - BatchIntegralFunction{iip, specialize}}) where {iip, - specialize} + SDEFunction{iip, specialize}, DDEFunction{iip, specialize}, + SDDEFunction{iip, specialize}, + DAEFunction{iip, specialize}, + DynamicalODEFunction{iip, specialize}, + SplitFunction{iip, specialize}, + DynamicalSDEFunction{iip, specialize}, + SplitSDEFunction{iip, specialize}, + DynamicalDDEFunction{iip, specialize}, + DiscreteFunction{iip, specialize}, + ImplicitDiscreteFunction{iip, specialize}, + RODEFunction{iip, specialize}, + NonlinearFunction{iip, specialize}, + OptimizationFunction{iip, specialize}, + BVPFunction{iip, specialize}, + IntegralFunction{iip, specialize}, + BatchIntegralFunction{iip, specialize}}) where {iip, + specialize} specialize end diff --git a/src/callbacks.jl b/src/callbacks.jl index edab070f65..b5faaa2d37 100644 --- a/src/callbacks.jl +++ b/src/callbacks.jl @@ -106,11 +106,11 @@ struct ContinuousCallback{F1, F2, F3, F4, F5, T, T2, T3, I, R} <: AbstractContin reltol::T2 repeat_nudge::T3 function ContinuousCallback(condition::F1, affect!::F2, affect_neg!::F3, - initialize::F4, finalize::F5, idxs::I, rootfind, - interp_points, save_positions, dtrelax::R, abstol::T, - reltol::T2, - repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2, T3, I, R - } + initialize::F4, finalize::F5, idxs::I, rootfind, + interp_points, save_positions, dtrelax::R, abstol::T, + reltol::T2, + repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2, T3, I, R + } new{F1, F2, F3, F4, F5, T, T2, T3, I, R}(condition, affect!, affect_neg!, initialize, finalize, idxs, rootfind, @@ -121,15 +121,15 @@ struct ContinuousCallback{F1, F2, F3, F4, F5, T, T2, T3, I, R} <: AbstractContin end function ContinuousCallback(condition, affect!, affect_neg!; - initialize = INITIALIZE_DEFAULT, - finalize = FINALIZE_DEFAULT, - idxs = nothing, - rootfind = LeftRootFind, - save_positions = (true, true), - interp_points = 10, - dtrelax = 1, - abstol = 10eps(), reltol = 0, - repeat_nudge = 1 // 100) + initialize = INITIALIZE_DEFAULT, + finalize = FINALIZE_DEFAULT, + idxs = nothing, + rootfind = LeftRootFind, + save_positions = (true, true), + interp_points = 10, + dtrelax = 1, + abstol = 10eps(), reltol = 0, + repeat_nudge = 1 // 100) ContinuousCallback(condition, affect!, affect_neg!, initialize, finalize, idxs, rootfind, interp_points, @@ -138,15 +138,15 @@ function ContinuousCallback(condition, affect!, affect_neg!; end function ContinuousCallback(condition, affect!; - initialize = INITIALIZE_DEFAULT, - finalize = FINALIZE_DEFAULT, - idxs = nothing, - rootfind = LeftRootFind, - save_positions = (true, true), - affect_neg! = affect!, - interp_points = 10, - dtrelax = 1, - abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100) + initialize = INITIALIZE_DEFAULT, + finalize = FINALIZE_DEFAULT, + idxs = nothing, + rootfind = LeftRootFind, + save_positions = (true, true), + affect_neg! = affect!, + interp_points = 10, + dtrelax = 1, + abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100) ContinuousCallback(condition, affect!, affect_neg!, initialize, finalize, idxs, rootfind, interp_points, collect(save_positions), @@ -211,11 +211,11 @@ struct VectorContinuousCallback{F1, F2, F3, F4, F5, T, T2, T3, I, R} <: reltol::T2 repeat_nudge::T3 function VectorContinuousCallback(condition::F1, affect!::F2, affect_neg!::F3, len::Int, - initialize::F4, finalize::F5, idxs::I, rootfind, - interp_points, save_positions, dtrelax::R, - abstol::T, reltol::T2, - repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2, - T3, I, R} + initialize::F4, finalize::F5, idxs::I, rootfind, + interp_points, save_positions, dtrelax::R, + abstol::T, reltol::T2, + repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2, + T3, I, R} new{F1, F2, F3, F4, F5, T, T2, T3, I, R}(condition, affect!, affect_neg!, len, initialize, finalize, idxs, rootfind, @@ -226,14 +226,14 @@ struct VectorContinuousCallback{F1, F2, F3, F4, F5, T, T2, T3, I, R} <: end function VectorContinuousCallback(condition, affect!, affect_neg!, len; - initialize = INITIALIZE_DEFAULT, - finalize = FINALIZE_DEFAULT, - idxs = nothing, - rootfind = LeftRootFind, - save_positions = (true, true), - interp_points = 10, - dtrelax = 1, - abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100) + initialize = INITIALIZE_DEFAULT, + finalize = FINALIZE_DEFAULT, + idxs = nothing, + rootfind = LeftRootFind, + save_positions = (true, true), + interp_points = 10, + dtrelax = 1, + abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100) VectorContinuousCallback(condition, affect!, affect_neg!, len, initialize, finalize, idxs, @@ -243,15 +243,15 @@ function VectorContinuousCallback(condition, affect!, affect_neg!, len; end function VectorContinuousCallback(condition, affect!, len; - initialize = INITIALIZE_DEFAULT, - finalize = FINALIZE_DEFAULT, - idxs = nothing, - rootfind = LeftRootFind, - save_positions = (true, true), - affect_neg! = affect!, - interp_points = 10, - dtrelax = 1, - abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100) + initialize = INITIALIZE_DEFAULT, + finalize = FINALIZE_DEFAULT, + idxs = nothing, + rootfind = LeftRootFind, + save_positions = (true, true), + affect_neg! = affect!, + interp_points = 10, + dtrelax = 1, + abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100) VectorContinuousCallback(condition, affect!, affect_neg!, len, initialize, finalize, idxs, rootfind, interp_points, @@ -297,16 +297,16 @@ struct DiscreteCallback{F1, F2, F3, F4} <: AbstractDiscreteCallback finalize::F4 save_positions::BitArray{1} function DiscreteCallback(condition::F1, affect!::F2, - initialize::F3, finalize::F4, - save_positions) where {F1, F2, F3, F4} + initialize::F3, finalize::F4, + save_positions) where {F1, F2, F3, F4} new{F1, F2, F3, F4}(condition, affect!, initialize, finalize, BitArray(collect(save_positions))) end end function DiscreteCallback(condition, affect!; - initialize = INITIALIZE_DEFAULT, finalize = FINALIZE_DEFAULT, - save_positions = (true, true)) + initialize = INITIALIZE_DEFAULT, finalize = FINALIZE_DEFAULT, + save_positions = (true, true)) DiscreteCallback(condition, affect!, initialize, finalize, save_positions) end diff --git a/src/ensemble/basic_ensemble_solve.jl b/src/ensemble/basic_ensemble_solve.jl index ece01eb687..a7da3173c4 100644 --- a/src/ensemble/basic_ensemble_solve.jl +++ b/src/ensemble/basic_ensemble_solve.jl @@ -24,12 +24,13 @@ $(TYPEDEF) struct EnsembleSerial <: BasicEnsembleAlgorithm end function merge_stats(us) - st = Iterators.filter(!isnothing, (hasproperty(x, :stats) ? x.stats : nothing for x in us)) + st = Iterators.filter(!isnothing, + (hasproperty(x, :stats) ? x.stats : nothing for x in us)) isempty(st) && return nothing reduce(merge, st) end -mutable struct AggregateLogger{T<:Logging.AbstractLogger} <: Logging.AbstractLogger +mutable struct AggregateLogger{T <: Logging.AbstractLogger} <: Logging.AbstractLogger progress::Dict{Symbol, Float64} done_counter::Int total::Float64 @@ -37,12 +38,22 @@ mutable struct AggregateLogger{T<:Logging.AbstractLogger} <: Logging.AbstractLog lock::ReentrantLock logger::T end -AggregateLogger(logger::Logging.AbstractLogger) = AggregateLogger(Dict{Symbol, Float64}(),0 , 0.0, 0.0, ReentrantLock(), logger) +function AggregateLogger(logger::Logging.AbstractLogger) + AggregateLogger(Dict{Symbol, Float64}(), 0, 0.0, 0.0, ReentrantLock(), logger) +end -function Logging.handle_message(l::AggregateLogger, level, message, _module, group, id, file, line; kwargs...) +function Logging.handle_message(l::AggregateLogger, + level, + message, + _module, + group, + id, + file, + line; + kwargs...) if convert(Logging.LogLevel, level) == Logging.LogLevel(-1) && haskey(kwargs, :progress) pr = kwargs[:progress] - if trylock(l.lock) || (pr == "done" && lock(l.lock)===nothing) + if trylock(l.lock) || (pr == "done" && lock(l.lock) === nothing) try if pr == "done" pr = 1.0 @@ -50,9 +61,9 @@ function Logging.handle_message(l::AggregateLogger, level, message, _module, gro end len = length(l.progress) if haskey(l.progress, id) - l.total += (pr-l.progress[id])/len + l.total += (pr - l.progress[id]) / len else - l.total = l.total*(len/(len+1)) + pr/(len+1) + l.total = l.total * (len / (len + 1)) + pr / (len + 1) len += 1 end l.progress[id] = pr @@ -61,19 +72,19 @@ function Logging.handle_message(l::AggregateLogger, level, message, _module, gro # @show tot l.total l.total ≈ tot curr_time = time() if l.done_counter >= len - tot="done" + tot = "done" empty!(l.progress) l.done_counter = 0 l.print_time = 0.0 - elseif curr_time-l.print_time > 0.1 + elseif curr_time - l.print_time > 0.1 tot = l.total l.print_time = curr_time else return end - id=:total - message="Total" - kwargs=merge(values(kwargs), (progress=tot,)) + id = :total + message = "Total" + kwargs = merge(values(kwargs), (progress = tot,)) finally unlock(l.lock) end @@ -81,15 +92,23 @@ function Logging.handle_message(l::AggregateLogger, level, message, _module, gro return end end - Logging.handle_message(l.logger, level, message, _module, group, id, file, line; kwargs...) + Logging.handle_message(l.logger, + level, + message, + _module, + group, + id, + file, + line; + kwargs...) end Logging.shouldlog(l::AggregateLogger, args...) = Logging.shouldlog(l.logger, args...) Logging.min_enabled_level(l::AggregateLogger) = Logging.min_enabled_level(l.logger) Logging.catch_exceptions(l::AggregateLogger) = Logging.catch_exceptions(l.logger) function __solve(prob::AbstractEnsembleProblem, - alg::Union{AbstractDEAlgorithm, Nothing}; - kwargs...) + alg::Union{AbstractDEAlgorithm, Nothing}; + kwargs...) if alg isa EnsembleAlgorithm # Assume DifferentialEquations.jl is being used, so default alg ensemblealg = alg @@ -107,20 +126,21 @@ tighten_container_eltype(u::Vector{Any}) = map(identity, u) tighten_container_eltype(u) = u function __solve(prob::EnsembleProblem{<:AbstractVector{<:AbstractSciMLProblem}}, - alg::Union{AbstractDEAlgorithm, Nothing}, - ensemblealg::BasicEnsembleAlgorithm; kwargs...) + alg::Union{AbstractDEAlgorithm, Nothing}, + ensemblealg::BasicEnsembleAlgorithm; kwargs...) # TODO: @invoke invoke(__solve, Tuple{AbstractEnsembleProblem, typeof(alg), typeof(ensemblealg)}, prob, alg, ensemblealg; trajectories = length(prob.prob), kwargs...) end function __solve(prob::AbstractEnsembleProblem, - alg::A, - ensemblealg::BasicEnsembleAlgorithm; - trajectories, batch_size = trajectories, progress_aggregate=true, - pmap_batch_size = batch_size ÷ 100 > 0 ? batch_size ÷ 100 : 1, kwargs...) where {A} - logger = progress_aggregate ? AggregateLogger(Logging.current_logger()) : Logging.current_logger() - + alg::A, + ensemblealg::BasicEnsembleAlgorithm; + trajectories, batch_size = trajectories, progress_aggregate = true, + pmap_batch_size = batch_size ÷ 100 > 0 ? batch_size ÷ 100 : 1, kwargs...) where {A} + logger = progress_aggregate ? AggregateLogger(Logging.current_logger()) : + Logging.current_logger() + Logging.with_logger(logger) do num_batches = trajectories ÷ batch_size num_batches < 1 && @@ -131,10 +151,12 @@ function __solve(prob::AbstractEnsembleProblem, name = get(kwargs, :progress_name, "Ensemble") for i in 1:trajectories msg = "$name #$i" - Logging.@logmsg(Logging.LogLevel(-1), msg, _id=Symbol("SciMLBase_$i"), progress=0) + Logging.@logmsg(Logging.LogLevel(-1), + msg, + _id=Symbol("SciMLBase_$i"), + progress=0) end end - if num_batches == 1 && prob.reduction === DEFAULT_REDUCTION elapsed_time = @elapsed u = solve_batch(prob, alg, ensemblealg, 1:trajectories, @@ -160,7 +182,12 @@ function __solve(prob::AbstractEnsembleProblem, else II = (batch_size * (i - 1) + 1):(batch_size * i) end - batch_data = solve_batch(prob, alg, ensemblealg, II, pmap_batch_size; kwargs...) + batch_data = solve_batch(prob, + alg, + ensemblealg, + II, + pmap_batch_size; + kwargs...) u, converged = prob.reduction(u, batch_data, II) end end @@ -181,7 +208,7 @@ function batch_func(i, prob, alg; kwargs...) name = get(kwargs, :progress_name, "Ensemble") progress_name = "$name #$i" progress_id = Symbol("SciMLBase_$i") - kwargs = (kwargs..., progress_name=progress_name, progress_id=progress_id) + kwargs = (kwargs..., progress_name = progress_name, progress_id = progress_id) end x = prob.output_func(solve(new_prob, alg; kwargs...), i) if !(x isa Tuple) @@ -208,7 +235,7 @@ function batch_func(i, prob, alg; kwargs...) end function solve_batch(prob, alg, ensemblealg::EnsembleDistributed, II, pmap_batch_size; - kwargs...) + kwargs...) wp = CachingPool(workers()) # Fix the return type of pmap @@ -243,7 +270,7 @@ function SciMLBase.solve_batch(prob, alg, ::EnsembleSerial, II, pmap_batch_size; end function solve_batch(prob, alg, ensemblealg::EnsembleThreads, II, pmap_batch_size; - kwargs...) + kwargs...) nthreads = min(Threads.nthreads(), length(II)) if length(II) == 1 || nthreads == 1 return solve_batch(prob, alg, EnsembleSerial(), II, pmap_batch_size; kwargs...) diff --git a/src/ensemble/ensemble_analysis.jl b/src/ensemble/ensemble_analysis.jl index c68fd5dade..f2ae4c9c14 100644 --- a/src/ensemble/ensemble_analysis.jl +++ b/src/ensemble/ensemble_analysis.jl @@ -122,7 +122,7 @@ function timepoint_weighted_meancov(sim, W, t1, t2) end function SciMLBase.EnsembleSummary(sim::SciMLBase.AbstractEnsembleSolution{T, N}, - t = sim[1].t; quantiles = [0.05, 0.95]) where {T, N} + t = sim[1].t; quantiles = [0.05, 0.95]) where {T, N} if sim[1] isa SciMLSolution m, v = timeseries_point_meanvar(sim, t) med = timeseries_point_median(sim, t) diff --git a/src/ensemble/ensemble_problems.jl b/src/ensemble/ensemble_problems.jl index 4ad4934954..a3839407f0 100644 --- a/src/ensemble/ensemble_problems.jl +++ b/src/ensemble/ensemble_problems.jl @@ -23,11 +23,11 @@ function EnsembleProblem(prob::AbstractVector{<:AbstractSciMLProblem}; kwargs... kwargs...) end function EnsembleProblem(prob; - prob_func = DEFAULT_PROB_FUNC, - output_func = DEFAULT_OUTPUT_FUNC, - reduction = DEFAULT_REDUCTION, - u_init = nothing, - safetycopy = prob_func !== DEFAULT_PROB_FUNC) + prob_func = DEFAULT_PROB_FUNC, + output_func = DEFAULT_OUTPUT_FUNC, + reduction = DEFAULT_REDUCTION, + u_init = nothing, + safetycopy = prob_func !== DEFAULT_PROB_FUNC) _prob_func = prepare_function(prob_func) _output_func = prepare_function(output_func) _reduction = prepare_function(reduction) @@ -36,16 +36,18 @@ function EnsembleProblem(prob; end function EnsembleProblem(; prob, - prob_func = DEFAULT_PROB_FUNC, - output_func = DEFAULT_OUTPUT_FUNC, - reduction = DEFAULT_REDUCTION, - u_init = nothing, p = nothing, - safetycopy = prob_func !== DEFAULT_PROB_FUNC) + prob_func = DEFAULT_PROB_FUNC, + output_func = DEFAULT_OUTPUT_FUNC, + reduction = DEFAULT_REDUCTION, + u_init = nothing, p = nothing, + safetycopy = prob_func !== DEFAULT_PROB_FUNC) EnsembleProblem(prob; prob_func, output_func, reduction, u_init, safetycopy) end #since NonlinearProblem might want to use this dispatch as well -function SciMLBase.EnsembleProblem(prob::AbstractSciMLProblem, u0s::Vector{Vector{T}}; kwargs...) where {T} +function SciMLBase.EnsembleProblem(prob::AbstractSciMLProblem, + u0s::Vector{Vector{T}}; + kwargs...) where {T} prob_func = (prob, i, repeat = nothing) -> remake(prob, u0 = u0s[i]) return SciMLBase.EnsembleProblem(prob; prob_func, kwargs...) end @@ -53,7 +55,10 @@ end #only makes sense for OptimizationProblem, might make sense for IntervalNonlinearProblem function SciMLBase.EnsembleProblem(prob::OptimizationProblem, trajectories::Int; kwargs...) if prob.lb !== nothing && prob.ub !== nothing - u0s = QuasiMonteCarlo.sample(trajectories, prob.lb, prob.ub, QuasiMonteCarlo.LatinHypercubeSample()) + u0s = QuasiMonteCarlo.sample(trajectories, + prob.lb, + prob.ub, + QuasiMonteCarlo.LatinHypercubeSample()) prob_func = (prob, i, repeat = nothing) -> remake(prob, u0 = u0s[:, i]) else error("EnsembleProblem with `trajectories` as second argument requires lower and upper bounds to be defined in the `OptimizationProblem`.") diff --git a/src/ensemble/ensemble_solutions.jl b/src/ensemble/ensemble_solutions.jl index 5bb198512c..8ad9ceb2c5 100644 --- a/src/ensemble/ensemble_solutions.jl +++ b/src/ensemble/ensemble_solutions.jl @@ -11,13 +11,13 @@ struct EnsembleTestSolution{T, N, S} <: AbstractEnsembleSolution{T, N, S} converged::Bool end function EnsembleTestSolution(sim::AbstractEnsembleSolution{T, N}, errors, weak_errors, - error_means, error_medians, elapsedTime, - converged) where {T, N} + error_means, error_medians, elapsedTime, + converged) where {T, N} EnsembleTestSolution{T, N, typeof(sim.u)}(sim.u, errors, weak_errors, error_means, error_medians, sim.elapsedTime, sim.converged) end function EnsembleTestSolution(u, errors, weak_errors, error_means, error_medians, - elapsedTime, converged) + elapsedTime, converged) EnsembleTestSolution(EnsembleSolution(u, elapsedTime, converged), errors, weak_errors, error_means, error_medians, elapsedTime, converged) end @@ -29,20 +29,23 @@ struct EnsembleSolution{T, N, S} <: AbstractEnsembleSolution{T, N, S} u::S elapsedTime::Float64 converged::Bool - stats + stats::Any end function EnsembleSolution(sim, dims::NTuple{N}, elapsedTime, converged, stats) where {N} - EnsembleSolution{eltype(eltype(sim)), N, typeof(sim)}(sim, elapsedTime, converged, stats) + EnsembleSolution{eltype(eltype(sim)), N, typeof(sim)}(sim, + elapsedTime, + converged, + stats) end -function EnsembleSolution(sim, elapsedTime, converged, stats=nothing) +function EnsembleSolution(sim, elapsedTime, converged, stats = nothing) EnsembleSolution(sim, (length(sim),), elapsedTime, converged, stats) end # Vector of some type which is not an array function EnsembleSolution(sim::T, elapsedTime, - converged, stats=nothing) where {T <: AbstractVector{T2} -} where {T2 <: - AbstractArray} - EnsembleSolution{eltype(eltype(sim)), ndims(sim[1]) + 1, typeof(sim)}( - sim, + converged, + stats = nothing) where {T <: AbstractVector{T2} + } where {T2 <: + AbstractArray} + EnsembleSolution{eltype(eltype(sim)), ndims(sim[1]) + 1, typeof(sim)}(sim, elapsedTime, converged, stats) @@ -82,8 +85,8 @@ function calculate_ensemble_errors(sim::AbstractEnsembleSolution; kwargs...) end function calculate_ensemble_errors(u; elapsedTime = 0.0, converged = false, - weak_timeseries_errors = false, - weak_dense_errors = false) + weak_timeseries_errors = false, + weak_dense_errors = false) errors = Dict{Symbol, Vector{eltype(u[1].u[1])}}() #Should add type information error_means = Dict{Symbol, eltype(u[1].u[1])}() error_medians = Dict{Symbol, eltype(u[1].u[1])}() @@ -139,9 +142,9 @@ end ### Plot Recipes @recipe function f(sim::AbstractEnsembleSolution; - zcolors = sim.u isa AbstractArray ? fill(nothing, length(sim.u)) : - nothing, - trajectories = eachindex(sim)) + zcolors = sim.u isa AbstractArray ? fill(nothing, length(sim.u)) : + nothing, + trajectories = eachindex(sim)) for i in trajectories size(sim[i].u, 1) == 0 && continue @series begin @@ -156,9 +159,9 @@ end end @recipe function f(sim::EnsembleSummary; - trajectories = sim.u[1] isa AbstractArray ? eachindex(sim.u[1]) : - 1, - error_style = :ribbon, ci_type = :quantile) + trajectories = sim.u[1] isa AbstractArray ? eachindex(sim.u[1]) : + 1, + error_style = :ribbon, ci_type = :quantile) if ci_type == :SEM if sim.u[1] isa AbstractArray u = vecarr_to_vectors(sim.u) @@ -214,8 +217,8 @@ Base.@propagate_inbounds function Base.getindex(x::AbstractEnsembleSolution, s, end Base.@propagate_inbounds function Base.getindex(x::AbstractEnsembleSolution, - ::Colon, - args::Colon...) + ::Colon, + args::Colon...) return invoke(getindex, Tuple{RecursiveArrayTools.AbstractVectorOfArray, Colon, typeof.(args)...}, x, diff --git a/src/function_wrappers.jl b/src/function_wrappers.jl index 93f5394c43..1e72621803 100644 --- a/src/function_wrappers.jl +++ b/src/function_wrappers.jl @@ -11,7 +11,9 @@ function TimeGradientWrapper(f::F, uprev, p) where {F} return TimeGradientWrapper{isinplace(f, 4)}(f, uprev, p) end -(ff::TimeGradientWrapper{true})(t) = (du2 = similar(ff.uprev); ff.f(du2, ff.uprev, ff.p, t); du2) +function (ff::TimeGradientWrapper{true})(t) + (du2 = similar(ff.uprev); ff.f(du2, ff.uprev, ff.p, t); du2) +end (ff::TimeGradientWrapper{true})(du2, t) = ff.f(du2, ff.uprev, ff.p, t) (ff::TimeGradientWrapper{false})(t) = ff.f(ff.uprev, ff.p, t) @@ -28,9 +30,13 @@ end UJacobianWrapper(f::F, t, p) where {F} = UJacobianWrapper{isinplace(f, 4)}(f, t, p) (ff::UJacobianWrapper{true})(du1, uprev) = ff.f(du1, uprev, ff.p, ff.t) -(ff::UJacobianWrapper{true})(uprev) = (du1 = similar(uprev); ff.f(du1, uprev, ff.p, ff.t); du1) +function (ff::UJacobianWrapper{true})(uprev) + (du1 = similar(uprev); ff.f(du1, uprev, ff.p, ff.t); du1) +end (ff::UJacobianWrapper{true})(du1, uprev, p, t) = ff.f(du1, uprev, p, t) -(ff::UJacobianWrapper{true})(uprev, p, t) = (du1 = similar(uprev); ff.f(du1, uprev, p, t); du1) +function (ff::UJacobianWrapper{true})(uprev, p, t) + (du1 = similar(uprev); ff.f(du1, uprev, p, t); du1) +end (ff::UJacobianWrapper{false})(uprev) = ff.f(uprev, ff.p, ff.t) (ff::UJacobianWrapper{false})(uprev, p, t) = ff.f(uprev, p, t) diff --git a/src/integrator_interface.jl b/src/integrator_interface.jl index 7046398a7b..75dac44c31 100644 --- a/src/integrator_interface.jl +++ b/src/integrator_interface.jl @@ -425,9 +425,9 @@ function sym_to_index(sym, integrator::DEIntegrator) end Base.@propagate_inbounds function Base.getindex(A::DEIntegrator, - I::Union{Int, AbstractArray{Int}, - CartesianIndex, Colon, BitArray, - AbstractArray{Bool}}...) + I::Union{Int, AbstractArray{Int}, + CartesianIndex, Colon, BitArray, + AbstractArray{Bool}}...) RecursiveArrayTools.VectorOfArray(A.u)[I...] end @@ -675,10 +675,10 @@ function Base.iterate(tup::IntegratorTuples, state = 0) end function Base.eltype(::Type{ - IntegratorTuples{I}, -}) where {U, T, - I <: - DEIntegrator{<:Any, <:Any, U, T}} + IntegratorTuples{I}, + }) where {U, T, + I <: + DEIntegrator{<:Any, <:Any, U, T}} Tuple{U, T} end Base.IteratorSize(::Type{<:IntegratorTuples}) = Base.SizeUnknown() @@ -702,11 +702,11 @@ function Base.iterate(tup::IntegratorIntervals, state = 0) end function Base.eltype(::Type{ - IntegratorIntervals{I}, -}) where {U, T, - I <: - DEIntegrator{<:Any, <:Any, U, T - }} + IntegratorIntervals{I}, + }) where {U, T, + I <: + DEIntegrator{<:Any, <:Any, U, T + }} Tuple{U, T, U, T} end Base.IteratorSize(::Type{<:IntegratorIntervals}) = Base.SizeUnknown() @@ -749,11 +749,11 @@ end Base.length(iter::TimeChoiceIterator) = length(iter.ts) @recipe function f(integrator::DEIntegrator; - denseplot = (integrator.opts.calck || - integrator isa AbstractSDEIntegrator) && - integrator.iter > 0, - plotdensity = 10, - plot_analytic = false, vars = nothing, idxs = nothing) + denseplot = (integrator.opts.calck || + integrator isa AbstractSDEIntegrator) && + integrator.iter > 0, + plotdensity = 10, + plot_analytic = false, vars = nothing, idxs = nothing) if vars !== nothing Base.depwarn("To maintain consistency with solution indexing, keyword argument vars will be removed in a future version. Please use keyword argument idxs instead.", :f; force = true) diff --git a/src/interpolation.jl b/src/interpolation.jl index d212ac82f6..f3a9f89eca 100644 --- a/src/interpolation.jl +++ b/src/interpolation.jl @@ -73,12 +73,12 @@ function (id::SensitivityInterpolation)(tvals, idxs, deriv, p, continuity::Symbo interpolation(tvals, id, idxs, deriv, p, continuity) end function (id::SensitivityInterpolation)(val, tvals, idxs, deriv, p, - continuity::Symbol = :left) + continuity::Symbol = :left) interpolation!(val, tvals, id, idxs, deriv, p, continuity) end @inline function interpolation(tvals, id::I, idxs, deriv::D, p, - continuity::Symbol = :left) where {I, D} + continuity::Symbol = :left) where {I, D} t = id.t u = id.u id isa HermiteInterpolation && (du = id.du) @@ -140,7 +140,7 @@ Get the value at tvals where the solution is known at the times t (sorted), with values u and derivatives ks """ @inline function interpolation!(vals, tvals, id::I, idxs, deriv::D, p, - continuity::Symbol = :left) where {I, D} + continuity::Symbol = :left) where {I, D} t = id.t u = id.u id isa HermiteInterpolation && (du = id.du) @@ -203,7 +203,7 @@ Get the value at tval where the solution is known at the times t (sorted), with values u and derivatives ks """ @inline function interpolation(tval::Number, id::I, idxs, deriv::D, p, - continuity::Symbol = :left) where {I, D} + continuity::Symbol = :left) where {I, D} t = id.t u = id.u id isa HermiteInterpolation && (du = id.du) @@ -253,7 +253,7 @@ Get the value at tval where the solution is known at the times t (sorted), with values u and derivatives ks """ @inline function interpolation!(out, tval::Number, id::I, idxs, deriv::D, p, - continuity::Symbol = :left) where {I, D} + continuity::Symbol = :left) where {I, D} t = id.t u = id.u id isa HermiteInterpolation && (du = id.du) @@ -296,7 +296,7 @@ times t (sorted), with values u and derivatives ks end @inline function interpolant(Θ, id::AbstractDiffEqInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - ::Type{Val{D}}) where {D} + ::Type{Val{D}}) where {D} error("$(string(typeof(id))) for $(D)th order not implemented") end ##################### Hermite Interpolants @@ -307,7 +307,7 @@ Hairer Norsett Wanner Solving Ordinary Differential Equations I - Nonstiff Probl Hermite Interpolation """ @inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{0}}) + T::Type{Val{0}}) if idxs === nothing out = @. (1 - Θ) * y₀ + Θ * y₁ + Θ * (Θ - 1) * ((1 - 2Θ) * (y₁ - y₀) + (Θ - 1) * dt * dy₀ + Θ * dt * dy₁) @@ -330,7 +330,7 @@ end Hermite Interpolation """ @inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{1}}) + T::Type{Val{1}}) if idxs === nothing out = @. dy₀ + Θ * (-4 * dt * dy₀ - 2 * dt * dy₁ - 6 * y₀ + @@ -356,7 +356,7 @@ end Hermite Interpolation """ @inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{2}}) + T::Type{Val{2}}) if idxs === nothing out = @. (-4 * dt * dy₀ - 2 * dt * dy₁ - 6 * y₀ + Θ * (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) + 6 * y₁) / @@ -378,7 +378,7 @@ end Hermite Interpolation """ @inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{3}}) + T::Type{Val{3}}) if idxs === nothing out = @. (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) / (dt * dt * dt) elseif idxs isa Number @@ -398,7 +398,7 @@ Hairer Norsett Wanner Solving Ordinary Differential Euations I - Nonstiff Proble Hermite Interpolation """ @inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{0}}) + T::Type{Val{0}}) if out === nothing return (1 - Θ) * y₀[idxs] + Θ * y₁[idxs] + Θ * (Θ - 1) * @@ -419,7 +419,7 @@ end Hermite Interpolation """ @inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{1}}) + T::Type{Val{1}}) if out === nothing return dy₀[idxs] + Θ * (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] + @@ -442,7 +442,7 @@ end Hermite Interpolation """ @inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{2}}) + T::Type{Val{2}}) if out === nothing return (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] + Θ * @@ -463,7 +463,7 @@ end Hermite Interpolation """ @inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs, - T::Type{Val{3}}) + T::Type{Val{3}}) if out === nothing return (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] - 12 * y₁[idxs]) / (dt * dt * dt) @@ -509,7 +509,7 @@ end Linear Interpolation """ @inline function interpolant!(out, Θ, id::LinearInterpolation, dt, y₀, y₁, idxs, - T::Type{Val{0}}) + T::Type{Val{0}}) Θm1 = (1 - Θ) if out === nothing return Θm1 * y₀[idxs] + Θ * y₁[idxs] @@ -524,7 +524,7 @@ end Linear Interpolation """ @inline function interpolant!(out, Θ, id::LinearInterpolation, dt, y₀, y₁, idxs, - T::Type{Val{1}}) + T::Type{Val{1}}) if out === nothing return (y₁[idxs] - y₀[idxs]) / dt elseif idxs === nothing @@ -540,7 +540,7 @@ end Constant Interpolation """ @inline function interpolant(Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs, - T::Type{Val{0}}) + T::Type{Val{0}}) if idxs === nothing out = @. y₀ elseif idxs isa Number @@ -553,7 +553,7 @@ Constant Interpolation end @inline function interpolant(Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs, - T::Type{Val{1}}) + T::Type{Val{1}}) if idxs === nothing out = zeros(eltype(y₀), length(y₀)) elseif idxs isa Number @@ -569,7 +569,7 @@ end Constant Interpolation """ @inline function interpolant!(out, Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs, - T::Type{Val{0}}) + T::Type{Val{0}}) if out === nothing return y₀[idxs] elseif idxs === nothing @@ -583,7 +583,7 @@ end Constant Interpolation """ @inline function interpolant!(out, Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs, - T::Type{Val{1}}) + T::Type{Val{1}}) if out === nothing return zeros(eltype(y₀), length(idxs)) else diff --git a/src/operators/basic_operators.jl b/src/operators/basic_operators.jl index 3faab1451d..b96045f760 100644 --- a/src/operators/basic_operators.jl +++ b/src/operators/basic_operators.jl @@ -111,7 +111,7 @@ end Base.@propagate_inbounds Base.convert(::Type{AbstractMatrix}, L::DiffEqArrayOperator) = L.A Base.@propagate_inbounds Base.setindex!(L::DiffEqArrayOperator, v, i::Int) = (L.A[i] = v) Base.@propagate_inbounds function Base.setindex!(L::DiffEqArrayOperator, v, - I::Vararg{Int, N}) where {N} + I::Vararg{Int, N}) where {N} (L.A[I...] = v) end @@ -155,39 +155,39 @@ struct FactorizedDiffEqArrayOperator{T <: Number, end function Base.convert(::Type{AbstractMatrix}, - L::FactorizedDiffEqArrayOperator{<:Any, - <:Union{Factorization, AbstractMatrix - }}) + L::FactorizedDiffEqArrayOperator{<:Any, + <:Union{Factorization, AbstractMatrix + }}) convert(AbstractMatrix, L.F) end function Base.convert(::Type{AbstractMatrix}, - L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Adjoint, AdjointFact} - }) + L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Adjoint, AdjointFact} + }) adjoint(convert(AbstractMatrix, adjoint(L.F))) end function Base.convert(::Type{AbstractMatrix}, - L::FactorizedDiffEqArrayOperator{<:Any, - <:Union{Transpose, TransposeFact}}) + L::FactorizedDiffEqArrayOperator{<:Any, + <:Union{Transpose, TransposeFact}}) transpose(convert(AbstractMatrix, transpose(L.F))) end function Base.Matrix(L::FactorizedDiffEqArrayOperator{<:Any, - <:Union{Factorization, AbstractMatrix - }}) + <:Union{Factorization, AbstractMatrix + }}) Matrix(L.F) end function Base.Matrix(L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Adjoint, AdjointFact}}) adjoint(Matrix(adjoint(L.F))) end function Base.Matrix(L::FactorizedDiffEqArrayOperator{<:Any, - <:Union{Transpose, TransposeFact}}) + <:Union{Transpose, TransposeFact}}) transpose(Matrix(transpose(L.F))) end Base.adjoint(L::FactorizedDiffEqArrayOperator) = FactorizedDiffEqArrayOperator(L.F') Base.size(L::FactorizedDiffEqArrayOperator, args...) = size(L.F, args...) function LinearAlgebra.ldiv!(Y::AbstractVecOrMat, L::FactorizedDiffEqArrayOperator, - B::AbstractVecOrMat) + B::AbstractVecOrMat) ldiv!(Y, L.F, B) end LinearAlgebra.ldiv!(L::FactorizedDiffEqArrayOperator, B::AbstractVecOrMat) = ldiv!(L.F, B) diff --git a/src/operators/common_defaults.jl b/src/operators/common_defaults.jl index d3cd298414..632b147828 100644 --- a/src/operators/common_defaults.jl +++ b/src/operators/common_defaults.jl @@ -14,7 +14,7 @@ function LinearAlgebra.opnorm(L::AbstractDiffEqLinearOperator, p::Real = 2) opnorm(convert(AbstractMatrix, L), p) end Base.@propagate_inbounds function Base.getindex(L::AbstractDiffEqLinearOperator, - I::Vararg{Any, N}) where {N} + I::Vararg{Any, N}) where {N} convert(AbstractMatrix, L)[I...] end function Base.getindex(L::AbstractDiffEqLinearOperator, I::Vararg{Int, N}) where {N} @@ -41,25 +41,25 @@ end ### added in https://github.com/SciML/SciMLBase.jl/pull/377 function LinearAlgebra.mul!(Y::AbstractVecOrMat, L::AbstractDiffEqLinearOperator, - B::AbstractVecOrMat) + B::AbstractVecOrMat) mul!(Y, convert(AbstractMatrix, L), B) end ### function LinearAlgebra.mul!(Y::AbstractArray, L::AbstractDiffEqLinearOperator, - B::AbstractArray) + B::AbstractArray) mul!(Y, convert(AbstractMatrix, L), B) end ### added in https://github.com/SciML/SciMLBase.jl/pull/377 function LinearAlgebra.mul!(Y::AbstractVecOrMat, L::AbstractDiffEqLinearOperator, - B::AbstractVecOrMat, α::Number, β::Number) + B::AbstractVecOrMat, α::Number, β::Number) mul!(Y, convert(AbstractMatrix, L), B, α, β) end ### function LinearAlgebra.mul!(Y::AbstractArray, L::AbstractDiffEqLinearOperator, - B::AbstractArray, α::Number, β::Number) + B::AbstractArray, α::Number, β::Number) mul!(Y, convert(AbstractMatrix, L), B, α, β) end diff --git a/src/operators/diffeq_operator.jl b/src/operators/diffeq_operator.jl index a97063b830..f2357778f1 100644 --- a/src/operators/diffeq_operator.jl +++ b/src/operators/diffeq_operator.jl @@ -120,7 +120,7 @@ Base.:*(x::AbstractVecOrMat, L::DiffEqScaledOperator) = (x * L.op) * L.coeff Base.:*(x::AbstractArray, L::DiffEqScaledOperator) = (x * L.op) * L.coeff function LinearAlgebra.mul!(r::AbstractVecOrMat, L::DiffEqScaledOperator, - x::AbstractVecOrMat) + x::AbstractVecOrMat) mul!(r, L.op, x) r .= r * L.coeff end @@ -130,7 +130,7 @@ function LinearAlgebra.mul!(r::AbstractArray, L::DiffEqScaledOperator, x::Abstra end function LinearAlgebra.mul!(r::AbstractVecOrMat, x::AbstractVecOrMat, - L::DiffEqScaledOperator) + L::DiffEqScaledOperator) mul!(r, x, L.op) r .= r * L.coeff end @@ -154,15 +154,15 @@ Base.:\(x::AbstractArray, L::DiffEqScaledOperator) = L.coeff * (x \ L) for N in (2, 3) @eval begin function LinearAlgebra.mul!(Y::AbstractArray{T, $N}, - L::DiffEqScaledOperator{T}, - B::AbstractArray{T, $N}) where {T} + L::DiffEqScaledOperator{T}, + B::AbstractArray{T, $N}) where {T} LinearAlgebra.lmul!(Y, L.coeff, mul!(Y, L.op, B)) end end end function LinearAlgebra.ldiv!(Y::AbstractVecOrMat, L::DiffEqScaledOperator, - B::AbstractVecOrMat) + B::AbstractVecOrMat) lmul!(1 / L.coeff, ldiv!(Y, L.op, B)) end function LinearAlgebra.ldiv!(Y::AbstractArray, L::DiffEqScaledOperator, B::AbstractArray) diff --git a/src/problems/analytical_problems.jl b/src/problems/analytical_problems.jl index d6b744885a..595e0acef4 100644 --- a/src/problems/analytical_problems.jl +++ b/src/problems/analytical_problems.jl @@ -9,7 +9,7 @@ struct AnalyticalProblem{uType, tType, isinplace, P, F, K} <: p::P kwargs::K @add_kwonly function AnalyticalProblem{iip}(f, u0, tspan, p = NullParameters(); - kwargs...) where {iip} + kwargs...) where {iip} _u0 = prepare_initial_state(u0) _tspan = promote_tspan(tspan) warn_paramtype(p) diff --git a/src/problems/dae_problems.jl b/src/problems/dae_problems.jl index e5e532aac3..4caf22549d 100644 --- a/src/problems/dae_problems.jl +++ b/src/problems/dae_problems.jl @@ -77,9 +77,9 @@ struct DAEProblem{uType, duType, tType, isinplace, P, F, K, D} <: kwargs::K differential_vars::D @add_kwonly function DAEProblem{iip}(f::AbstractDAEFunction{iip}, - du0, u0, tspan, p = NullParameters(); - differential_vars = nothing, - kwargs...) where {iip} + du0, u0, tspan, p = NullParameters(); + differential_vars = nothing, + kwargs...) where {iip} _u0 = prepare_initial_state(u0) _du0 = prepare_initial_state(du0) if !isnothing(_u0) diff --git a/src/problems/discrete_problems.jl b/src/problems/discrete_problems.jl index 4b06bdfa59..1f39e9d090 100644 --- a/src/problems/discrete_problems.jl +++ b/src/problems/discrete_problems.jl @@ -88,8 +88,8 @@ struct DiscreteProblem{uType, tType, isinplace, P, F, K} <: """ A callback to be applied to every solver which uses the problem.""" kwargs::K @add_kwonly function DiscreteProblem{iip}(f::AbstractDiscreteFunction{iip}, - u0, tspan::Tuple, p = NullParameters(); - kwargs...) where {iip} + u0, tspan::Tuple, p = NullParameters(); + kwargs...) where {iip} _u0 = prepare_initial_state(u0) _tspan = promote_tspan(tspan) warn_paramtype(p) @@ -103,7 +103,7 @@ struct DiscreteProblem{uType, tType, isinplace, P, F, K} <: end function DiscreteProblem{iip}(u0::Nothing, tspan::Nothing, p = NullParameters(); - callback = nothing) where {iip} + callback = nothing) where {iip} if iip f = DISCRETE_INPLACE_DEFAULT else @@ -130,12 +130,12 @@ TruncatedStacktraces.@truncate_stacktrace DiscreteProblem 3 1 2 Defines a discrete problem with the specified functions. """ function DiscreteProblem(f::AbstractDiscreteFunction, u0, tspan::Tuple, - p = NullParameters(); kwargs...) + p = NullParameters(); kwargs...) DiscreteProblem{isinplace(f)}(f, u0, tspan, p; kwargs...) end function DiscreteProblem(f::Base.Callable, u0, tspan::Tuple, p = NullParameters(); - kwargs...) + kwargs...) iip = isinplace(f, 4) DiscreteProblem(DiscreteFunction{iip}(f), u0, tspan, p; kwargs...) end @@ -146,7 +146,7 @@ $(SIGNATURES) Define a discrete problem with the identity map. """ function DiscreteProblem(u0::Union{AbstractArray, Number}, tspan::Tuple, - p = NullParameters(); kwargs...) + p = NullParameters(); kwargs...) iip = u0 isa AbstractArray if iip f = DISCRETE_INPLACE_DEFAULT diff --git a/src/problems/implicit_discrete_problems.jl b/src/problems/implicit_discrete_problems.jl index b6463fa6be..8a8d3eff71 100644 --- a/src/problems/implicit_discrete_problems.jl +++ b/src/problems/implicit_discrete_problems.jl @@ -81,11 +81,11 @@ struct ImplicitDiscreteProblem{uType, tType, isinplace, P, F, K} <: """ A callback to be applied to every solver which uses the problem.""" kwargs::K @add_kwonly function ImplicitDiscreteProblem{iip}(f::ImplicitDiscreteFunction{ - iip, - }, - u0, tspan::Tuple, - p = NullParameters(); - kwargs...) where {iip} + iip, + }, + u0, tspan::Tuple, + p = NullParameters(); + kwargs...) where {iip} _u0 = prepare_initial_state(u0) _tspan = promote_tspan(tspan) warn_paramtype(p) @@ -99,7 +99,7 @@ struct ImplicitDiscreteProblem{uType, tType, isinplace, P, F, K} <: end function ImplicitDiscreteProblem{iip}(f, u0, tspan, p = NullParameters(); - kwargs...) where {iip} + kwargs...) where {iip} ImplicitDiscreteProblem(ImplicitDiscreteFunction{iip}(f), u0, tspan, p; kwargs...) end end @@ -112,12 +112,12 @@ TruncatedStacktraces.@truncate_stacktrace ImplicitDiscreteProblem 3 1 2 Defines a discrete problem with the specified functions. """ function ImplicitDiscreteProblem(f::ImplicitDiscreteFunction, u0, tspan::Tuple, - p = NullParameters(); kwargs...) + p = NullParameters(); kwargs...) ImplicitDiscreteProblem{isinplace(f, 6)}(f, u0, tspan, p; kwargs...) end function ImplicitDiscreteProblem(f, u0, tspan, p = NullParameters(); - kwargs...) + kwargs...) iip = isinplace(f, 6) ImplicitDiscreteProblem(ImplicitDiscreteFunction{iip}(f), u0, tspan, p; kwargs...) end diff --git a/src/problems/problem_traits.jl b/src/problems/problem_traits.jl index dbafebd1f8..b2392bf798 100644 --- a/src/problems/problem_traits.jl +++ b/src/problems/problem_traits.jl @@ -3,29 +3,29 @@ """ is_diagonal_noise(prob::AbstractSciMLProblem) = false function is_diagonal_noise(prob::AbstractRODEProblem{ - uType, - tType, - iip, - Nothing, -}) where { - uType, - tType, - iip, -} + uType, + tType, + iip, + Nothing, + }) where { + uType, + tType, + iip, + } true end function is_diagonal_noise(prob::AbstractSDDEProblem{ - uType, - tType, - lType, - iip, - Nothing, -}) where { - uType, - tType, - lType, - iip, -} + uType, + tType, + lType, + iip, + Nothing, + }) where { + uType, + tType, + lType, + iip, + } true end @@ -40,44 +40,44 @@ isinplace(prob::AbstractNonlinearProblem{uType, iip}) where {uType, iip} = iip isinplace(prob::AbstractIntegralProblem{iip}) where {iip} = iip isinplace(prob::AbstractODEProblem{uType, tType, iip}) where {uType, tType, iip} = iip function isinplace(prob::AbstractRODEProblem{ - uType, - tType, - iip, - ND, -}) where {uType, tType, - iip, ND} + uType, + tType, + iip, + ND, + }) where {uType, tType, + iip, ND} iip end function isinplace(prob::AbstractDDEProblem{ - uType, - tType, - lType, - iip, -}) where {uType, tType, - lType, iip} + uType, + tType, + lType, + iip, + }) where {uType, tType, + lType, iip} iip end function isinplace(prob::AbstractDAEProblem{ - uType, - duType, - tType, - iip, -}) where {uType, - duType, - tType, iip} + uType, + duType, + tType, + iip, + }) where {uType, + duType, + tType, iip} iip end isinplace(prob::AbstractNoiseProblem) = isinplace(prob.noise) isinplace(::SplitFunction{iip}) where {iip} = iip function isinplace(prob::AbstractSDDEProblem{ - uType, - tType, - lType, - iip, - ND, -}) where {uType, - tType, - lType, - iip, ND} + uType, + tType, + lType, + iip, + ND, + }) where {uType, + tType, + lType, + iip, ND} iip end diff --git a/src/problems/rode_problems.jl b/src/problems/rode_problems.jl index 18c7a587eb..880b934e8e 100644 --- a/src/problems/rode_problems.jl +++ b/src/problems/rode_problems.jl @@ -65,10 +65,10 @@ mutable struct RODEProblem{uType, tType, isinplace, P, NP, F, K, ND} <: rand_prototype::ND seed::UInt64 @add_kwonly function RODEProblem{iip}(f::RODEFunction{iip}, u0, tspan, - p = NullParameters(); - rand_prototype = nothing, - noise = nothing, seed = UInt64(0), - kwargs...) where {iip} + p = NullParameters(); + rand_prototype = nothing, + noise = nothing, seed = UInt64(0), + kwargs...) where {iip} _u0 = prepare_initial_state(u0) _tspan = promote_tspan(tspan) warn_paramtype(p) diff --git a/src/problems/sdde_problems.jl b/src/problems/sdde_problems.jl index 76d4b8dc4a..c9033aaa94 100644 --- a/src/problems/sdde_problems.jl +++ b/src/problems/sdde_problems.jl @@ -118,14 +118,14 @@ struct SDDEProblem{uType, tType, lType, lType2, isinplace, P, NP, F, G, H, K, ND order_discontinuity_t0::Rational{Int} @add_kwonly function SDDEProblem{iip}(f::AbstractSDDEFunction{iip}, g, u0, h, tspan, - p = NullParameters(); - noise_rate_prototype = nothing, noise = nothing, - seed = UInt64(0), - constant_lags = (), dependent_lags = (), - neutral = f.mass_matrix !== I && - det(f.mass_matrix) != 1, - order_discontinuity_t0 = 0 // 1, - kwargs...) where {iip} + p = NullParameters(); + noise_rate_prototype = nothing, noise = nothing, + seed = UInt64(0), + constant_lags = (), dependent_lags = (), + neutral = f.mass_matrix !== I && + det(f.mass_matrix) != 1, + order_discontinuity_t0 = 0 // 1, + kwargs...) where {iip} _u0 = prepare_initial_state(u0) _tspan = promote_tspan(tspan) warn_paramtype(p) @@ -138,8 +138,8 @@ struct SDDEProblem{uType, tType, lType, lType2, isinplace, P, NP, F, G, H, K, ND end function SDDEProblem{iip}(f::AbstractSDDEFunction{iip}, g, h, tspan::Tuple, - p = NullParameters(); - order_discontinuity_t0 = 1 // 1, kwargs...) where {iip} + p = NullParameters(); + order_discontinuity_t0 = 1 // 1, kwargs...) where {iip} SDDEProblem{iip}(f, g, h(p, first(tspan)), h, tspan, p; order_discontinuity_t0 = max(1 // 1, order_discontinuity_t0), kwargs...) diff --git a/src/problems/steady_state_problems.jl b/src/problems/steady_state_problems.jl index f25f15e274..b2daec0e10 100644 --- a/src/problems/steady_state_problems.jl +++ b/src/problems/steady_state_problems.jl @@ -81,8 +81,8 @@ struct SteadyStateProblem{uType, isinplace, P, F, K} <: p::P kwargs::K @add_kwonly function SteadyStateProblem{iip}(f::AbstractODEFunction{iip}, - u0, p = NullParameters(); - kwargs...) where {iip} + u0, p = NullParameters(); + kwargs...) where {iip} _u0 = prepare_initial_state(u0) warn_paramtype(p) new{typeof(_u0), isinplace(f), typeof(p), typeof(f), typeof(kwargs)}(f, _u0, p, diff --git a/src/remake.jl b/src/remake.jl index a03c11ec52..f2e3d3342d 100644 --- a/src/remake.jl +++ b/src/remake.jl @@ -50,11 +50,11 @@ Remake the given `ODEProblem`. If `u0` or `p` are given as symbolic maps `ModelingToolkit.jl` has to be loaded. """ function remake(prob::ODEProblem; f = missing, - u0 = missing, - tspan = missing, - p = missing, - kwargs = missing, - _kwargs...) + u0 = missing, + tspan = missing, + p = missing, + kwargs = missing, + _kwargs...) if tspan === missing tspan = prob.tspan end @@ -127,7 +127,7 @@ end Remake the given `BVProblem`. """ function remake(prob::BVProblem; f = missing, bc = missing, u0 = missing, tspan = missing, - p = missing, kwargs = missing, problem_type = missing, _kwargs...) + p = missing, kwargs = missing, problem_type = missing, _kwargs...) if tspan === missing tspan = prob.tspan end @@ -164,10 +164,10 @@ function remake(prob::BVProblem; f = missing, bc = missing, u0 = missing, tspan ptspan = promote_tspan(tspan) if iip _f = BVPFunction{iip, FunctionWrapperSpecialize, twopoint}(wrapfun_iip(f, - (u0, u0, p, ptspan[1])), bc; prob.f.bcresid_prototype) + (u0, u0, p, ptspan[1])), bc; prob.f.bcresid_prototype) else _f = BVPFunction{iip, FunctionWrapperSpecialize, twopoint}(wrapfun_oop(f, - (u0, p, ptspan[1])), bc; prob.f.bcresid_prototype) + (u0, p, ptspan[1])), bc; prob.f.bcresid_prototype) end else _f = BVPFunction{isinplace(prob), specialization(prob.f), twopoint}(f, bc; @@ -189,15 +189,15 @@ end Remake the given `SDEProblem`. """ function remake(prob::SDEProblem; - f = missing, - u0 = missing, - tspan = missing, - p = missing, - noise = missing, - noise_rate_prototype = missing, - seed = missing, - kwargs = missing, - _kwargs...) + f = missing, + u0 = missing, + tspan = missing, + p = missing, + noise = missing, + noise_rate_prototype = missing, + seed = missing, + kwargs = missing, + _kwargs...) if tspan === missing tspan = prob.tspan end @@ -252,17 +252,17 @@ Remake the given `OptimizationProblem`. If `u0` or `p` are given as symbolic maps `ModelingToolkit.jl` has to be loaded. """ function remake(prob::OptimizationProblem; - f = missing, - u0 = missing, - p = missing, - lb = missing, - ub = missing, - int = missing, - lcons = missing, - ucons = missing, - sense = missing, - kwargs = missing, - _kwargs...) + f = missing, + u0 = missing, + p = missing, + lb = missing, + ub = missing, + int = missing, + lcons = missing, + ucons = missing, + sense = missing, + kwargs = missing, + _kwargs...) if p === missing && u0 === missing p, u0 = prob.p, prob.u0 else # at least one of them has a value @@ -326,12 +326,12 @@ Remake the given `NonlinearProblem`. If `u0` or `p` are given as symbolic maps `ModelingToolkit.jl` has to be loaded. """ function remake(prob::NonlinearProblem; - f = missing, - u0 = missing, - p = missing, - problem_type = missing, - kwargs = missing, - _kwargs...) + f = missing, + u0 = missing, + p = missing, + problem_type = missing, + kwargs = missing, + _kwargs...) if p === missing && u0 === missing p, u0 = prob.p, prob.u0 else # at least one of them has a value @@ -369,7 +369,6 @@ function remake(prob::NonlinearProblem; end end - """ remake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing, kwargs = missing, _kwargs...) @@ -377,7 +376,7 @@ end Remake the given `NonlinearLeastSquaresProblem`. """ function remake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing, - kwargs = missing, _kwargs...) + kwargs = missing, _kwargs...) if p === missing && u0 === missing p, u0 = prob.p, prob.u0 else # at least one of them has a value diff --git a/src/scimlfunctions.jl b/src/scimlfunctions.jl index 55eae6b4b1..c48ddd2048 100644 --- a/src/scimlfunctions.jl +++ b/src/scimlfunctions.jl @@ -2125,7 +2125,7 @@ For more details on this argument, see the ODEFunction documentation. The fields of the OptimizationFunction type directly match the names of the inputs. """ struct OptimizationFunction{iip, AD, F, G, H, HV, C, CJ, CH, HP, CJP, CHP, S, S2, O, - EX, CEX, SYS, LH, LHP, HCV, CJCV, CHCV, LHCV} <: + EX, CEX, SYS, LH, LHP, HCV, CJCV, CHCV, LHCV} <: AbstractOptimizationFunction{iip} f::F adtype::AD @@ -2462,32 +2462,32 @@ end ######### Basic Constructor function ODEFunction{iip, specialize}(f; - mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : - I, - analytic = __has_analytic(f) ? f.analytic : nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : + mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : + I, + analytic = __has_analytic(f) ? f.analytic : nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : + nothing, + sparsity = __has_sparsity(f) ? f.sparsity : + jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, + W_prototype = __has_W_prototype(f) ? f.W_prototype : nothing, + paramjac = __has_paramjac(f) ? f.paramjac : nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - sparsity = __has_sparsity(f) ? f.sparsity : - jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, - W_prototype = __has_W_prototype(f) ? f.W_prototype : nothing, - paramjac = __has_paramjac(f) ? f.paramjac : nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f) ? f.colorvec : nothing, - sys = __has_sys(f) ? f.sys : nothing) where {iip, - specialize, -} + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f) ? f.colorvec : nothing, + sys = __has_sys(f) ? f.sys : nothing) where {iip, + specialize, + } if mass_matrix === I && f isa Tuple mass_matrix = ((I for i in 1:length(f))...,) end @@ -2700,8 +2700,8 @@ function NonlinearFunction{iip}(f::ODEFunction) where {iip} end @add_kwonly function SplitFunction(f1, f2, mass_matrix, cache, analytic, tgrad, jac, jvp, - vjp, jac_prototype, sparsity, Wfact, Wfact_t, paramjac, - syms, indepsym, paramsyms, observed, colorvec, sys) + vjp, jac_prototype, sparsity, Wfact, Wfact_t, paramjac, + syms, indepsym, paramsyms, observed, colorvec, sys) f1 = ODEFunction(f1) f2 = ODEFunction(f2) @@ -2722,36 +2722,36 @@ end paramsyms, observed, colorvec, sys) end function SplitFunction{iip, specialize}(f1, f2; - mass_matrix = __has_mass_matrix(f1) ? - f1.mass_matrix : I, - _func_cache = nothing, - analytic = __has_analytic(f1) ? f1.analytic : - nothing, - tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, - jac = __has_jac(f1) ? f1.jac : nothing, - jvp = __has_jvp(f1) ? f1.jvp : nothing, - vjp = __has_vjp(f1) ? f1.vjp : nothing, - jac_prototype = __has_jac_prototype(f1) ? - f1.jac_prototype : + mass_matrix = __has_mass_matrix(f1) ? + f1.mass_matrix : I, + _func_cache = nothing, + analytic = __has_analytic(f1) ? f1.analytic : + nothing, + tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, + jac = __has_jac(f1) ? f1.jac : nothing, + jvp = __has_jvp(f1) ? f1.jvp : nothing, + vjp = __has_vjp(f1) ? f1.vjp : nothing, + jac_prototype = __has_jac_prototype(f1) ? + f1.jac_prototype : + nothing, + sparsity = __has_sparsity(f1) ? f1.sparsity : + jac_prototype, + Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, + Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing, + paramjac = __has_paramjac(f1) ? f1.paramjac : + nothing, + syms = __has_syms(f1) ? f1.syms : nothing, + indepsym = __has_indepsym(f1) ? f1.indepsym : + nothing, + paramsyms = __has_paramsyms(f1) ? f1.paramsyms : nothing, - sparsity = __has_sparsity(f1) ? f1.sparsity : - jac_prototype, - Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, - Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing, - paramjac = __has_paramjac(f1) ? f1.paramjac : - nothing, - syms = __has_syms(f1) ? f1.syms : nothing, - indepsym = __has_indepsym(f1) ? f1.indepsym : - nothing, - paramsyms = __has_paramsyms(f1) ? f1.paramsyms : - nothing, - observed = __has_observed(f1) ? f1.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f1) ? f1.colorvec : - nothing, - sys = __has_sys(f1) ? f1.sys : nothing) where {iip, - specialize, -} + observed = __has_observed(f1) ? f1.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f1) ? f1.colorvec : + nothing, + sys = __has_sys(f1) ? f1.sys : nothing) where {iip, + specialize, + } if specialize === NoSpecialize SplitFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, @@ -2784,9 +2784,9 @@ end SplitFunction(f::SplitFunction; kwargs...) = f @add_kwonly function DynamicalODEFunction{iip}(f1, f2, mass_matrix, analytic, tgrad, jac, - jvp, vjp, jac_prototype, sparsity, Wfact, - Wfact_t, paramjac, syms, indepsym, paramsyms, - observed, colorvec, sys) where {iip} + jvp, vjp, jac_prototype, sparsity, Wfact, + Wfact_t, paramjac, syms, indepsym, paramsyms, + observed, colorvec, sys) where {iip} f1 = f1 isa AbstractSciMLOperator ? f1 : ODEFunction(f1) f2 = ODEFunction(f2) @@ -2809,37 +2809,37 @@ SplitFunction(f::SplitFunction; kwargs...) = f end function DynamicalODEFunction{iip, specialize}(f1, f2; - mass_matrix = __has_mass_matrix(f1) ? - f1.mass_matrix : I, - analytic = __has_analytic(f1) ? f1.analytic : - nothing, - tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, - jac = __has_jac(f1) ? f1.jac : nothing, - jvp = __has_jvp(f1) ? f1.jvp : nothing, - vjp = __has_vjp(f1) ? f1.vjp : nothing, - jac_prototype = __has_jac_prototype(f1) ? - f1.jac_prototype : nothing, - sparsity = __has_sparsity(f1) ? f1.sparsity : - jac_prototype, - Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, - Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : - nothing, - paramjac = __has_paramjac(f1) ? f1.paramjac : - nothing, - syms = __has_syms(f1) ? f1.syms : nothing, - indepsym = __has_indepsym(f1) ? f1.indepsym : - nothing, - paramsyms = __has_paramsyms(f1) ? - f1.paramsyms : - nothing, - observed = __has_observed(f1) ? f1.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f1) ? f1.colorvec : - nothing, - sys = __has_sys(f1) ? f1.sys : nothing) where { - iip, - specialize, -} + mass_matrix = __has_mass_matrix(f1) ? + f1.mass_matrix : I, + analytic = __has_analytic(f1) ? f1.analytic : + nothing, + tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, + jac = __has_jac(f1) ? f1.jac : nothing, + jvp = __has_jvp(f1) ? f1.jvp : nothing, + vjp = __has_vjp(f1) ? f1.vjp : nothing, + jac_prototype = __has_jac_prototype(f1) ? + f1.jac_prototype : nothing, + sparsity = __has_sparsity(f1) ? f1.sparsity : + jac_prototype, + Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, + Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : + nothing, + paramjac = __has_paramjac(f1) ? f1.paramjac : + nothing, + syms = __has_syms(f1) ? f1.syms : nothing, + indepsym = __has_indepsym(f1) ? f1.indepsym : + nothing, + paramsyms = __has_paramsyms(f1) ? + f1.paramsyms : + nothing, + observed = __has_observed(f1) ? f1.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f1) ? f1.colorvec : + nothing, + sys = __has_sys(f1) ? f1.sys : nothing) where { + iip, + specialize, + } if specialize === NoSpecialize DynamicalODEFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, @@ -2878,18 +2878,18 @@ end DynamicalODEFunction(f::DynamicalODEFunction; kwargs...) = f function DiscreteFunction{iip, specialize}(f; - analytic = __has_analytic(f) ? f.analytic : - nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : - nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED, - sys = __has_sys(f) ? f.sys : nothing) where {iip, - specialize, -} + analytic = __has_analytic(f) ? f.analytic : + nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : + nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : + nothing, + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED, + sys = __has_sys(f) ? f.sys : nothing) where {iip, + specialize, + } _f = prepare_function(f) if specialize === NoSpecialize DiscreteFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any}(_f, analytic, @@ -2930,23 +2930,23 @@ function unwrapped_f(f::DiscreteFunction, newf = unwrapped_f(f.f)) end function ImplicitDiscreteFunction{iip, specialize}(f; - analytic = __has_analytic(f) ? - f.analytic : - nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? - f.indepsym : - nothing, - paramsyms = __has_paramsyms(f) ? - f.paramsyms : - nothing, - observed = __has_observed(f) ? - f.observed : - DEFAULT_OBSERVED, - sys = __has_sys(f) ? f.sys : nothing) where { - iip, - specialize, -} + analytic = __has_analytic(f) ? + f.analytic : + nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? + f.indepsym : + nothing, + paramsyms = __has_paramsyms(f) ? + f.paramsyms : + nothing, + observed = __has_observed(f) ? + f.observed : + DEFAULT_OBSERVED, + sys = __has_sys(f) ? f.sys : nothing) where { + iip, + specialize, + } _f = prepare_function(f) if specialize === NoSpecialize ImplicitDiscreteFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any}(_f, @@ -2993,32 +2993,32 @@ function unwrapped_f(f::ImplicitDiscreteFunction, newf = unwrapped_f(f.f)) end function SDEFunction{iip, specialize}(f, g; - mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : - I, - analytic = __has_analytic(f) ? f.analytic : nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : + mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : + I, + analytic = __has_analytic(f) ? f.analytic : nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : + nothing, + sparsity = __has_sparsity(f) ? f.sparsity : + jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, + paramjac = __has_paramjac(f) ? f.paramjac : nothing, + ggprime = nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - sparsity = __has_sparsity(f) ? f.sparsity : - jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, - paramjac = __has_paramjac(f) ? f.paramjac : nothing, - ggprime = nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f) ? f.colorvec : nothing, - sys = __has_sys(f) ? f.sys : nothing) where {iip, - specialize, -} + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f) ? f.colorvec : nothing, + sys = __has_sys(f) ? f.sys : nothing) where {iip, + specialize, + } if jac === nothing && isa(jac_prototype, AbstractSciMLOperator) if iip jac = update_coefficients! #(J,u,p,t) @@ -3085,7 +3085,7 @@ function SDEFunction{iip, specialize}(f, g; end function unwrapped_f(f::SDEFunction, newf = unwrapped_f(f.f), - newg = unwrapped_f(f.g)) + newg = unwrapped_f(f.g)) specialize = specialization(f) if specialize === NoSpecialize @@ -3148,9 +3148,9 @@ end SDEFunction(f::SDEFunction; kwargs...) = f @add_kwonly function SplitSDEFunction(f1, f2, g, mass_matrix, cache, analytic, tgrad, jac, - jvp, vjp, - jac_prototype, Wfact, Wfact_t, paramjac, observed, - syms, indepsym, paramsyms, colorvec, sys) + jvp, vjp, + jac_prototype, Wfact, Wfact_t, paramjac, observed, + syms, indepsym, paramsyms, colorvec, sys) f1 = f1 isa AbstractSciMLOperator ? f1 : SDEFunction(f1) f2 = SDEFunction(f2) SplitFunction{isinplace(f2), typeof(f1), typeof(f2), typeof(g), typeof(mass_matrix), @@ -3165,38 +3165,38 @@ SDEFunction(f::SDEFunction; kwargs...) = f end function SplitSDEFunction{iip, specialize}(f1, f2, g; - mass_matrix = __has_mass_matrix(f1) ? - f1.mass_matrix : - I, - _func_cache = nothing, - analytic = __has_analytic(f1) ? f1.analytic : - nothing, - tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, - jac = __has_jac(f1) ? f1.jac : nothing, - jac_prototype = __has_jac_prototype(f1) ? - f1.jac_prototype : nothing, - sparsity = __has_sparsity(f1) ? f1.sparsity : - jac_prototype, - jvp = __has_jvp(f1) ? f1.jvp : nothing, - vjp = __has_vjp(f1) ? f1.vjp : nothing, - Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, - Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : - nothing, - paramjac = __has_paramjac(f1) ? f1.paramjac : - nothing, - syms = __has_syms(f1) ? f1.syms : nothing, - indepsym = __has_indepsym(f1) ? f1.indepsym : - nothing, - paramsyms = __has_paramsyms(f1) ? f1.paramsyms : - nothing, - observed = __has_observed(f1) ? f1.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f1) ? f1.colorvec : - nothing, - sys = __has_sys(f1) ? f1.sys : nothing) where { - iip, - specialize, -} + mass_matrix = __has_mass_matrix(f1) ? + f1.mass_matrix : + I, + _func_cache = nothing, + analytic = __has_analytic(f1) ? f1.analytic : + nothing, + tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, + jac = __has_jac(f1) ? f1.jac : nothing, + jac_prototype = __has_jac_prototype(f1) ? + f1.jac_prototype : nothing, + sparsity = __has_sparsity(f1) ? f1.sparsity : + jac_prototype, + jvp = __has_jvp(f1) ? f1.jvp : nothing, + vjp = __has_vjp(f1) ? f1.vjp : nothing, + Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, + Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : + nothing, + paramjac = __has_paramjac(f1) ? f1.paramjac : + nothing, + syms = __has_syms(f1) ? f1.syms : nothing, + indepsym = __has_indepsym(f1) ? f1.indepsym : + nothing, + paramsyms = __has_paramsyms(f1) ? f1.paramsyms : + nothing, + observed = __has_observed(f1) ? f1.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f1) ? f1.colorvec : + nothing, + sys = __has_sys(f1) ? f1.sys : nothing) where { + iip, + specialize, + } if specialize === NoSpecialize SplitSDEFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, @@ -3233,10 +3233,10 @@ end SplitSDEFunction(f::SplitSDEFunction; kwargs...) = f @add_kwonly function DynamicalSDEFunction(f1, f2, g, mass_matrix, cache, analytic, tgrad, - jac, jvp, vjp, - jac_prototype, Wfact, Wfact_t, paramjac, - syms, indepsym, paramsyms, observed, colorvec, - sys) + jac, jvp, vjp, + jac_prototype, Wfact, Wfact_t, paramjac, + syms, indepsym, paramsyms, observed, colorvec, + sys) f1 = f1 isa AbstractSciMLOperator ? f1 : SDEFunction(f1) f2 = SDEFunction(f2) DynamicalSDEFunction{isinplace(f2), FullSpecialize, typeof(f1), typeof(f2), typeof(g), @@ -3252,37 +3252,37 @@ SplitSDEFunction(f::SplitSDEFunction; kwargs...) = f end function DynamicalSDEFunction{iip, specialize}(f1, f2, g; - mass_matrix = __has_mass_matrix(f1) ? - f1.mass_matrix : I, - _func_cache = nothing, - analytic = __has_analytic(f1) ? f1.analytic : - nothing, - tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, - jac = __has_jac(f1) ? f1.jac : nothing, - jac_prototype = __has_jac_prototype(f1) ? - f1.jac_prototype : nothing, - sparsity = __has_sparsity(f1) ? f1.sparsity : - jac_prototype, - jvp = __has_jvp(f1) ? f1.jvp : nothing, - vjp = __has_vjp(f1) ? f1.vjp : nothing, - Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, - Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : - nothing, - paramjac = __has_paramjac(f1) ? f1.paramjac : - nothing, - syms = __has_syms(f1) ? f1.syms : nothing, - indepsym = __has_indepsym(f1) ? f1.indepsym : - nothing, - paramsyms = __has_paramsyms(f1) ? - f1.paramsyms : nothing, - observed = __has_observed(f1) ? f1.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f1) ? f1.colorvec : - nothing, - sys = __has_sys(f1) ? f1.sys : nothing) where { - iip, - specialize, -} + mass_matrix = __has_mass_matrix(f1) ? + f1.mass_matrix : I, + _func_cache = nothing, + analytic = __has_analytic(f1) ? f1.analytic : + nothing, + tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, + jac = __has_jac(f1) ? f1.jac : nothing, + jac_prototype = __has_jac_prototype(f1) ? + f1.jac_prototype : nothing, + sparsity = __has_sparsity(f1) ? f1.sparsity : + jac_prototype, + jvp = __has_jvp(f1) ? f1.jvp : nothing, + vjp = __has_vjp(f1) ? f1.vjp : nothing, + Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, + Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : + nothing, + paramjac = __has_paramjac(f1) ? f1.paramjac : + nothing, + syms = __has_syms(f1) ? f1.syms : nothing, + indepsym = __has_indepsym(f1) ? f1.indepsym : + nothing, + paramsyms = __has_paramsyms(f1) ? + f1.paramsyms : nothing, + observed = __has_observed(f1) ? f1.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f1) ? f1.colorvec : + nothing, + sys = __has_sys(f1) ? f1.sys : nothing) where { + iip, + specialize, + } if specialize === NoSpecialize DynamicalSDEFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, @@ -3319,33 +3319,33 @@ end DynamicalSDEFunction(f::DynamicalSDEFunction; kwargs...) = f function RODEFunction{iip, specialize}(f; - mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : - I, - analytic = __has_analytic(f) ? f.analytic : nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : + mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : + I, + analytic = __has_analytic(f) ? f.analytic : nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : + nothing, + sparsity = __has_sparsity(f) ? f.sparsity : + jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, + paramjac = __has_paramjac(f) ? f.paramjac : nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - sparsity = __has_sparsity(f) ? f.sparsity : - jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, - paramjac = __has_paramjac(f) ? f.paramjac : nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f) ? f.colorvec : nothing, - sys = __has_sys(f) ? f.sys : nothing, - analytic_full = __has_analytic_full(f) ? - f.analytic_full : false) where {iip, - specialize, -} + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f) ? f.colorvec : nothing, + sys = __has_sys(f) ? f.sys : nothing, + analytic_full = __has_analytic_full(f) ? + f.analytic_full : false) where {iip, + specialize, + } if jac === nothing && isa(jac_prototype, AbstractSciMLOperator) if iip jac = update_coefficients! #(J,u,p,t) @@ -3419,29 +3419,29 @@ end RODEFunction(f::RODEFunction; kwargs...) = f function DAEFunction{iip, specialize}(f; - analytic = __has_analytic(f) ? f.analytic : nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : + analytic = __has_analytic(f) ? f.analytic : nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : + nothing, + sparsity = __has_sparsity(f) ? f.sparsity : + jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, + paramjac = __has_paramjac(f) ? f.paramjac : nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - sparsity = __has_sparsity(f) ? f.sparsity : - jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, - paramjac = __has_paramjac(f) ? f.paramjac : nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f) ? f.colorvec : nothing, - sys = __has_sys(f) ? f.sys : nothing) where {iip, - specialize, -} + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f) ? f.colorvec : nothing, + sys = __has_sys(f) ? f.sys : nothing) where {iip, + specialize, + } if jac === nothing && isa(jac_prototype, AbstractSciMLOperator) if iip jac = update_coefficients! #(J,u,p,t) @@ -3501,31 +3501,31 @@ DAEFunction(f; kwargs...) = DAEFunction{isinplace(f, 5), FullSpecialize}(f; kwar DAEFunction(f::DAEFunction; kwargs...) = f function DDEFunction{iip, specialize}(f; - mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : - I, - analytic = __has_analytic(f) ? f.analytic : nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : + mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : + I, + analytic = __has_analytic(f) ? f.analytic : nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : + nothing, + sparsity = __has_sparsity(f) ? f.sparsity : + jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, + paramjac = __has_paramjac(f) ? f.paramjac : nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - sparsity = __has_sparsity(f) ? f.sparsity : - jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, - paramjac = __has_paramjac(f) ? f.paramjac : nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f) ? f.colorvec : nothing, - sys = __has_sys(f) ? f.sys : nothing) where {iip, - specialize, -} + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f) ? f.colorvec : nothing, + sys = __has_sys(f) ? f.sys : nothing) where {iip, + specialize, + } if jac === nothing && isa(jac_prototype, AbstractSciMLOperator) if iip jac = update_coefficients! #(J,u,p,t) @@ -3598,11 +3598,11 @@ DDEFunction(f; kwargs...) = DDEFunction{isinplace(f, 5), FullSpecialize}(f; kwar DDEFunction(f::DDEFunction; kwargs...) = f @add_kwonly function DynamicalDDEFunction{iip}(f1, f2, mass_matrix, analytic, tgrad, jac, - jvp, vjp, - jac_prototype, sparsity, Wfact, Wfact_t, - paramjac, - syms, indepsym, paramsyms, observed, - colorvec) where {iip} + jvp, vjp, + jac_prototype, sparsity, Wfact, Wfact_t, + paramjac, + syms, indepsym, paramsyms, observed, + colorvec) where {iip} f1 = f1 isa AbstractSciMLOperator ? f1 : DDEFunction(f1) f2 = DDEFunction(f2) DynamicalDDEFunction{isinplace(f2), FullSpecialize, typeof(f1), typeof(f2), @@ -3619,36 +3619,36 @@ DDEFunction(f::DDEFunction; kwargs...) = f colorvec, sys) end function DynamicalDDEFunction{iip, specialize}(f1, f2; - mass_matrix = __has_mass_matrix(f1) ? - f1.mass_matrix : I, - analytic = __has_analytic(f1) ? f1.analytic : - nothing, - tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, - jac = __has_jac(f1) ? f1.jac : nothing, - jvp = __has_jvp(f1) ? f1.jvp : nothing, - vjp = __has_vjp(f1) ? f1.vjp : nothing, - jac_prototype = __has_jac_prototype(f1) ? - f1.jac_prototype : nothing, - sparsity = __has_sparsity(f1) ? f1.sparsity : - jac_prototype, - Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, - Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : - nothing, - paramjac = __has_paramjac(f1) ? f1.paramjac : - nothing, - syms = __has_syms(f1) ? f1.syms : nothing, - indepsym = __has_indepsym(f1) ? f1.indepsym : - nothing, - paramsyms = __has_paramsyms(f1) ? - f1.paramsyms : nothing, - observed = __has_observed(f1) ? f1.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f1) ? f1.colorvec : - nothing, - sys = __has_sys(f1) ? f1.sys : nothing) where { - iip, - specialize, -} + mass_matrix = __has_mass_matrix(f1) ? + f1.mass_matrix : I, + analytic = __has_analytic(f1) ? f1.analytic : + nothing, + tgrad = __has_tgrad(f1) ? f1.tgrad : nothing, + jac = __has_jac(f1) ? f1.jac : nothing, + jvp = __has_jvp(f1) ? f1.jvp : nothing, + vjp = __has_vjp(f1) ? f1.vjp : nothing, + jac_prototype = __has_jac_prototype(f1) ? + f1.jac_prototype : nothing, + sparsity = __has_sparsity(f1) ? f1.sparsity : + jac_prototype, + Wfact = __has_Wfact(f1) ? f1.Wfact : nothing, + Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : + nothing, + paramjac = __has_paramjac(f1) ? f1.paramjac : + nothing, + syms = __has_syms(f1) ? f1.syms : nothing, + indepsym = __has_indepsym(f1) ? f1.indepsym : + nothing, + paramsyms = __has_paramsyms(f1) ? + f1.paramsyms : nothing, + observed = __has_observed(f1) ? f1.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f1) ? f1.colorvec : + nothing, + sys = __has_sys(f1) ? f1.sys : nothing) where { + iip, + specialize, + } if specialize === NoSpecialize DynamicalDDEFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, @@ -3690,32 +3690,32 @@ end DynamicalDDEFunction(f::DynamicalDDEFunction; kwargs...) = f function SDDEFunction{iip, specialize}(f, g; - mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : - I, - analytic = __has_analytic(f) ? f.analytic : nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : + mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : + I, + analytic = __has_analytic(f) ? f.analytic : nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : + nothing, + sparsity = __has_sparsity(f) ? f.sparsity : + jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, + paramjac = __has_paramjac(f) ? f.paramjac : nothing, + ggprime = nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - sparsity = __has_sparsity(f) ? f.sparsity : - jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, - paramjac = __has_paramjac(f) ? f.paramjac : nothing, - ggprime = nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED, - colorvec = __has_colorvec(f) ? f.colorvec : nothing, - sys = __has_sys(f) ? f.sys : nothing) where {iip, - specialize, -} + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED, + colorvec = __has_colorvec(f) ? f.colorvec : nothing, + sys = __has_sys(f) ? f.sys : nothing) where {iip, + specialize, + } if jac === nothing && isa(jac_prototype, AbstractSciMLOperator) if iip jac = update_coefficients! #(J,u,p,t) @@ -3779,35 +3779,34 @@ end SDDEFunction(f::SDDEFunction; kwargs...) = f function NonlinearFunction{iip, specialize}(f; - mass_matrix = __has_mass_matrix(f) ? - f.mass_matrix : - I, - analytic = __has_analytic(f) ? f.analytic : - nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : nothing, - sparsity = __has_sparsity(f) ? f.sparsity : - jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : - nothing, - paramjac = __has_paramjac(f) ? f.paramjac : - nothing, - syms = __has_syms(f) ? f.syms : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : - nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED_NO_TIME, - colorvec = __has_colorvec(f) ? f.colorvec : - nothing, - sys = __has_sys(f) ? f.sys : nothing, - resid_prototype = __has_resid_prototype(f) ? f.resid_prototype : nothing) where { - iip, specialize} - + mass_matrix = __has_mass_matrix(f) ? + f.mass_matrix : + I, + analytic = __has_analytic(f) ? f.analytic : + nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : nothing, + sparsity = __has_sparsity(f) ? f.sparsity : + jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : + nothing, + paramjac = __has_paramjac(f) ? f.paramjac : + nothing, + syms = __has_syms(f) ? f.syms : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : + nothing, + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED_NO_TIME, + colorvec = __has_colorvec(f) ? f.colorvec : + nothing, + sys = __has_sys(f) ? f.sys : nothing, + resid_prototype = __has_resid_prototype(f) ? f.resid_prototype : nothing) where { + iip, specialize} if mass_matrix === I && f isa Tuple mass_matrix = ((I for i in 1:length(f))...,) end @@ -3881,20 +3880,20 @@ end NonlinearFunction(f::NonlinearFunction; kwargs...) = f function IntervalNonlinearFunction{iip, specialize}(f; - analytic = __has_analytic(f) ? - f.analytic : - nothing, - syms = __has_syms(f) ? f.syms : nothing, - paramsyms = __has_paramsyms(f) ? - f.paramsyms : - nothing, - observed = __has_observed(f) ? - f.observed : - DEFAULT_OBSERVED_NO_TIME, - sys = __has_sys(f) ? f.sys : nothing) where { - iip, - specialize, -} + analytic = __has_analytic(f) ? + f.analytic : + nothing, + syms = __has_syms(f) ? f.syms : nothing, + paramsyms = __has_paramsyms(f) ? + f.paramsyms : + nothing, + observed = __has_observed(f) ? + f.observed : + DEFAULT_OBSERVED_NO_TIME, + sys = __has_sys(f) ? f.sys : nothing) where { + iip, + specialize, + } _f = prepare_function(f) if specialize === NoSpecialize @@ -3934,67 +3933,67 @@ struct NoAD <: AbstractADType end OptimizationFunction(args...; kwargs...) = OptimizationFunction{true}(args...; kwargs...) function OptimizationFunction{iip}(f, adtype::AbstractADType = NoAD(); - grad = nothing, hess = nothing, hv = nothing, - cons = nothing, cons_j = nothing, cons_h = nothing, - hess_prototype = nothing, - cons_jac_prototype = __has_jac_prototype(f) ? - f.jac_prototype : nothing, - cons_hess_prototype = nothing, - syms = __has_syms(f) ? f.syms : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - observed = __has_observed(f) ? f.observed : - DEFAULT_OBSERVED_NO_TIME, - expr = nothing, cons_expr = nothing, - sys = __has_sys(f) ? f.sys : nothing, - lag_h = nothing, lag_hess_prototype = nothing, - hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing, - cons_jac_colorvec = __has_colorvec(f) ? f.colorvec : - nothing, - cons_hess_colorvec = __has_colorvec(f) ? f.colorvec : - nothing, - lag_hess_colorvec = nothing) where {iip} + grad = nothing, hess = nothing, hv = nothing, + cons = nothing, cons_j = nothing, cons_h = nothing, + hess_prototype = nothing, + cons_jac_prototype = __has_jac_prototype(f) ? + f.jac_prototype : nothing, + cons_hess_prototype = nothing, + syms = __has_syms(f) ? f.syms : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, + observed = __has_observed(f) ? f.observed : + DEFAULT_OBSERVED_NO_TIME, + expr = nothing, cons_expr = nothing, + sys = __has_sys(f) ? f.sys : nothing, + lag_h = nothing, lag_hess_prototype = nothing, + hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing, + cons_jac_colorvec = __has_colorvec(f) ? f.colorvec : + nothing, + cons_hess_colorvec = __has_colorvec(f) ? f.colorvec : + nothing, + lag_hess_colorvec = nothing) where {iip} isinplace(f, 2; has_two_dispatches = false, isoptimization = true) OptimizationFunction{iip, typeof(adtype), typeof(f), typeof(grad), typeof(hess), - typeof(hv), - typeof(cons), typeof(cons_j), typeof(cons_h), - typeof(hess_prototype), - typeof(cons_jac_prototype), typeof(cons_hess_prototype), - typeof(syms), typeof(paramsyms), typeof(observed), - typeof(expr), typeof(cons_expr), typeof(sys), typeof(lag_h), - typeof(lag_hess_prototype), typeof(hess_colorvec), - typeof(cons_jac_colorvec), typeof(cons_hess_colorvec), - typeof(lag_hess_colorvec) - }(f, adtype, grad, hess, - hv, cons, cons_j, cons_h, - hess_prototype, cons_jac_prototype, - cons_hess_prototype, syms, - paramsyms, observed, expr, cons_expr, sys, - lag_h, lag_hess_prototype, hess_colorvec, cons_jac_colorvec, - cons_hess_colorvec, lag_hess_colorvec) + typeof(hv), + typeof(cons), typeof(cons_j), typeof(cons_h), + typeof(hess_prototype), + typeof(cons_jac_prototype), typeof(cons_hess_prototype), + typeof(syms), typeof(paramsyms), typeof(observed), + typeof(expr), typeof(cons_expr), typeof(sys), typeof(lag_h), + typeof(lag_hess_prototype), typeof(hess_colorvec), + typeof(cons_jac_colorvec), typeof(cons_hess_colorvec), + typeof(lag_hess_colorvec), + }(f, adtype, grad, hess, + hv, cons, cons_j, cons_h, + hess_prototype, cons_jac_prototype, + cons_hess_prototype, syms, + paramsyms, observed, expr, cons_expr, sys, + lag_h, lag_hess_prototype, hess_colorvec, cons_jac_colorvec, + cons_hess_colorvec, lag_hess_colorvec) end function BVPFunction{iip, specialize, twopoint}(f, bc; - mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I, - analytic = __has_analytic(f) ? f.analytic : nothing, - tgrad = __has_tgrad(f) ? f.tgrad : nothing, - jac = __has_jac(f) ? f.jac : nothing, - bcjac = __has_jac(bc) ? bc.jac : nothing, - jvp = __has_jvp(f) ? f.jvp : nothing, - vjp = __has_vjp(f) ? f.vjp : nothing, - jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing, - bcjac_prototype = __has_jac_prototype(bc) ? bc.jac_prototype : nothing, - bcresid_prototype = nothing, - sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype, - Wfact = __has_Wfact(f) ? f.Wfact : nothing, - Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, - paramjac = __has_paramjac(f) ? f.paramjac : nothing, - syms = __has_syms(f) ? f.syms : nothing, - indepsym = __has_indepsym(f) ? f.indepsym : nothing, - paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, - observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED, - colorvec = __has_colorvec(f) ? f.colorvec : nothing, - bccolorvec = __has_colorvec(bc) ? bc.colorvec : nothing, - sys = __has_sys(f) ? f.sys : nothing) where {iip, specialize, twopoint} + mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I, + analytic = __has_analytic(f) ? f.analytic : nothing, + tgrad = __has_tgrad(f) ? f.tgrad : nothing, + jac = __has_jac(f) ? f.jac : nothing, + bcjac = __has_jac(bc) ? bc.jac : nothing, + jvp = __has_jvp(f) ? f.jvp : nothing, + vjp = __has_vjp(f) ? f.vjp : nothing, + jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing, + bcjac_prototype = __has_jac_prototype(bc) ? bc.jac_prototype : nothing, + bcresid_prototype = nothing, + sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype, + Wfact = __has_Wfact(f) ? f.Wfact : nothing, + Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing, + paramjac = __has_paramjac(f) ? f.paramjac : nothing, + syms = __has_syms(f) ? f.syms : nothing, + indepsym = __has_indepsym(f) ? f.indepsym : nothing, + paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing, + observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED, + colorvec = __has_colorvec(f) ? f.colorvec : nothing, + bccolorvec = __has_colorvec(bc) ? bc.colorvec : nothing, + sys = __has_sys(f) ? f.sys : nothing) where {iip, specialize, twopoint} if mass_matrix === I && f isa Tuple mass_matrix = ((I for i in 1:length(f))...,) end @@ -4051,7 +4050,8 @@ function BVPFunction{iip, specialize, twopoint}(f, bc; else @assert length(bcjac) == 2 bcjac = Tuple(bcjac) - if isinplace(first(bcjac), 3, "bcjac", bciip) != isinplace(last(bcjac), 3, "bcjac", bciip) + if isinplace(first(bcjac), 3, "bcjac", bciip) != + isinplace(last(bcjac), 3, "bcjac", bciip) throw(NonconformingFunctionsError(["bcjac[1]", "bcjac[2]"])) end isinplace(bcjac, 3, "bcjac", iip) @@ -4085,9 +4085,11 @@ function BVPFunction{iip, specialize, twopoint}(f, bc; last(bcresid_prototype)) end - bccolorvec !== nothing && length(bccolorvec) == 2 && (bccolorvec = Tuple(bccolorvec)) + bccolorvec !== nothing && length(bccolorvec) == 2 && + (bccolorvec = Tuple(bccolorvec)) - bcjac_prototype !== nothing && length(bcjac_prototype) == 2 && (bcjac_prototype = Tuple(bcjac_prototype)) + bcjac_prototype !== nothing && length(bcjac_prototype) == 2 && + (bcjac_prototype = Tuple(bcjac_prototype)) end if any(bc_nonconforming) @@ -4123,12 +4125,12 @@ function BVPFunction{iip, specialize, twopoint}(f, bc; end end -function BVPFunction{iip}(f, bc; twopoint::Union{Val, Bool}=Val(false), - kwargs...) where {iip} +function BVPFunction{iip}(f, bc; twopoint::Union{Val, Bool} = Val(false), + kwargs...) where {iip} BVPFunction{iip, FullSpecialize, _unwrap_val(twopoint)}(f, bc; kwargs...) end BVPFunction{iip}(f::BVPFunction, bc; kwargs...) where {iip} = f -function BVPFunction(f, bc; twopoint::Union{Val, Bool}=Val(false), kwargs...) +function BVPFunction(f, bc; twopoint::Union{Val, Bool} = Val(false), kwargs...) BVPFunction{isinplace(f, 4), FullSpecialize, _unwrap_val(twopoint)}(f, bc; kwargs...) end BVPFunction(f::BVPFunction; kwargs...) = f @@ -4158,7 +4160,7 @@ function IntegralFunction(f, integrand_prototype) end function BatchIntegralFunction{iip, specialize}(f, integrand_prototype; - max_batch::Integer = typemax(Int)) where {iip, specialize} + max_batch::Integer = typemax(Int)) where {iip, specialize} _f = prepare_function(f) BatchIntegralFunction{ iip, @@ -4171,8 +4173,8 @@ function BatchIntegralFunction{iip, specialize}(f, integrand_prototype; end function BatchIntegralFunction{iip}(f, - integrand_prototype; - kwargs...) where {iip} + integrand_prototype; + kwargs...) where {iip} return BatchIntegralFunction{iip, FullSpecialize}(f, integrand_prototype; kwargs...) @@ -4312,8 +4314,8 @@ for S in [:ODEFunction :BatchIntegralFunction] @eval begin function ConstructionBase.constructorof(::Type{<:$S{iip}}) where { - iip, - } + iip, + } (args...) -> $S{iip, FullSpecialize, map(typeof, args)...}(args...) end end diff --git a/src/solutions/basic_solutions.jl b/src/solutions/basic_solutions.jl index 3f5160cde9..5d86d61222 100644 --- a/src/solutions/basic_solutions.jl +++ b/src/solutions/basic_solutions.jl @@ -30,8 +30,8 @@ struct LinearSolution{T, N, uType, R, A, C, S} <: AbstractLinearSolution{T, N} end function build_linear_solution(alg, u, resid, cache; - retcode = ReturnCode.Default, - iters = 0, stats = nothing) + retcode = ReturnCode.Default, + iters = 0, stats = nothing) T = eltype(eltype(u)) N = length((size(u)...,)) LinearSolution{T, N, typeof(u), typeof(resid), typeof(alg), typeof(cache), @@ -78,8 +78,8 @@ struct QuadratureSolution end @deprecate QuadratureSolution(args...; kwargs...) IntegralSolution(args...; kwargs...) function build_solution(prob::AbstractIntegralProblem, - alg, u, resid; chi = nothing, - retcode = ReturnCode.Default, stats = nothing, kwargs...) + alg, u, resid; chi = nothing, + retcode = ReturnCode.Default, stats = nothing, kwargs...) T = eltype(eltype(u)) N = length((size(u)...,)) diff --git a/src/solutions/dae_solutions.jl b/src/solutions/dae_solutions.jl index 9852c9349e..800f2ca5f6 100644 --- a/src/solutions/dae_solutions.jl +++ b/src/solutions/dae_solutions.jl @@ -54,17 +54,17 @@ end TruncatedStacktraces.@truncate_stacktrace DAESolution 1 2 function build_solution(prob::AbstractDAEProblem, alg, t, u, du = nothing; - timeseries_errors = length(u) > 2, - dense = false, - dense_errors = dense, - calculate_error = true, - k = nothing, - interp = du === nothing ? LinearInterpolation(t, u) : - HermiteInterpolation(t, u, du), - retcode = ReturnCode.Default, - destats = missing, - stats = nothing, - kwargs...) + timeseries_errors = length(u) > 2, + dense = false, + dense_errors = dense, + calculate_error = true, + k = nothing, + interp = du === nothing ? LinearInterpolation(t, u) : + HermiteInterpolation(t, u, du), + retcode = ReturnCode.Default, + destats = missing, + stats = nothing, + kwargs...) T = eltype(eltype(u)) if prob.u0 === nothing @@ -120,8 +120,8 @@ function build_solution(prob::AbstractDAEProblem, alg, t, u, du = nothing; end function calculate_solution_errors!(sol::AbstractDAESolution; - fill_uanalytic = true, timeseries_errors = true, - dense_errors = true) + fill_uanalytic = true, timeseries_errors = true, + dense_errors = true) prob = sol.prob f = prob.f diff --git a/src/solutions/nonlinear_solutions.jl b/src/solutions/nonlinear_solutions.jl index 2a8eedb70c..532f89d967 100644 --- a/src/solutions/nonlinear_solutions.jl +++ b/src/solutions/nonlinear_solutions.jl @@ -73,14 +73,14 @@ const SteadyStateSolution = NonlinearSolution get_p(p::AbstractNonlinearSolution) = p.prob.p function build_solution(prob::AbstractNonlinearProblem, - alg, u, resid; calculate_error = true, - retcode = ReturnCode.Default, - original = nothing, - left = nothing, - right = nothing, - stats = nothing, - trace = nothing, - kwargs...) + alg, u, resid; calculate_error = true, + retcode = ReturnCode.Default, + original = nothing, + left = nothing, + right = nothing, + stats = nothing, + trace = nothing, + kwargs...) T = eltype(eltype(u)) N = ndims(u) diff --git a/src/solutions/ode_solutions.jl b/src/solutions/ode_solutions.jl index b3ba74fe6e..9474cce71c 100644 --- a/src/solutions/ode_solutions.jl +++ b/src/solutions/ode_solutions.jl @@ -56,8 +56,7 @@ function Base.show(io::IO, ::MIME"text/plain", s::DEStats) end function Base.merge(a::DEStats, b::DEStats) - DEStats( - a.nf + b.nf, + DEStats(a.nf + b.nf, a.nf2 + b.nf2, a.nw + b.nw, a.nsolve + b.nsolve, @@ -67,8 +66,7 @@ function Base.merge(a::DEStats, b::DEStats) a.ncondition + b.ncondition, a.naccept + b.naccept, a.nreject + b.nreject, - max(a.maxeig, b.maxeig), - ) + max(a.maxeig, b.maxeig)) end """ @@ -126,7 +124,7 @@ Base.@propagate_inbounds function Base.getproperty(x::AbstractODESolution, s::Sy end function ODESolution{T, N}(u, u_analytic, errors, t, k, prob, alg, interp, dense, - tslocation, stats, alg_choice, retcode) where {T, N} + tslocation, stats, alg_choice, retcode) where {T, N} return ODESolution{T, N, typeof(u), typeof(u_analytic), typeof(errors), typeof(t), typeof(k), typeof(prob), typeof(alg), typeof(interp), typeof(stats), @@ -135,35 +133,35 @@ function ODESolution{T, N}(u, u_analytic, errors, t, k, prob, alg, interp, dense end function (sol::AbstractODESolution)(t, ::Type{deriv} = Val{0}; idxs = nothing, - continuity = :left) where {deriv} + continuity = :left) where {deriv} sol(t, deriv, idxs, continuity) end function (sol::AbstractODESolution)(v, t, ::Type{deriv} = Val{0}; idxs = nothing, - continuity = :left) where {deriv} + continuity = :left) where {deriv} sol.interp(v, t, idxs, deriv, sol.prob.p, continuity) end function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs::Nothing, - continuity) where {deriv} + continuity) where {deriv} sol.interp(t, idxs, deriv, sol.prob.p, continuity) end function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, - idxs::Nothing, continuity) where {deriv} + idxs::Nothing, continuity) where {deriv} augment(sol.interp(t, idxs, deriv, sol.prob.p, continuity), sol) end function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs::Integer, - continuity) where {deriv} + continuity) where {deriv} sol.interp(t, idxs, deriv, sol.prob.p, continuity) end function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, - idxs::AbstractVector{<:Integer}, - continuity) where {deriv} + idxs::AbstractVector{<:Integer}, + continuity) where {deriv} sol.interp(t, idxs, deriv, sol.prob.p, continuity) end function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, - idxs::Integer, continuity) where {deriv} + idxs::Integer, continuity) where {deriv} A = sol.interp(t, idxs, deriv, sol.prob.p, continuity) observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED p = hasproperty(sol.prob, :p) ? sol.prob.p : nothing @@ -181,8 +179,8 @@ function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, end end function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, - idxs::AbstractVector{<:Integer}, - continuity) where {deriv} + idxs::AbstractVector{<:Integer}, + continuity) where {deriv} A = sol.interp(t, idxs, deriv, sol.prob.p, continuity) observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED p = hasproperty(sol.prob, :p) ? sol.prob.p : nothing @@ -201,20 +199,20 @@ function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, end function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs, - continuity) where {deriv} + continuity) where {deriv} issymbollike(idxs) || error("Incorrect specification of `idxs`") augment(sol.interp([t], nothing, deriv, sol.prob.p, continuity), sol)[idxs][1] end function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs::AbstractVector, - continuity) where {deriv} + continuity) where {deriv} all(issymbollike.(idxs)) || error("Incorrect specification of `idxs`") interp_sol = augment(sol.interp([t], nothing, deriv, sol.prob.p, continuity), sol) [first(interp_sol[idx]) for idx in idxs] end function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, idxs, - continuity) where {deriv} + continuity) where {deriv} issymbollike(idxs) || error("Incorrect specification of `idxs`") interp_sol = augment(sol.interp(t, nothing, deriv, sol.prob.p, continuity), sol) observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED @@ -228,7 +226,7 @@ function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, end function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, - idxs::AbstractVector, continuity) where {deriv} + idxs::AbstractVector, continuity) where {deriv} all(issymbollike.(idxs)) || error("Incorrect specification of `idxs`") interp_sol = augment(sol.interp(t, nothing, deriv, sol.prob.p, continuity), sol) observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED @@ -245,14 +243,14 @@ function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, end function build_solution(prob::Union{AbstractODEProblem, AbstractDDEProblem}, - alg, t, u; timeseries_errors = length(u) > 2, - dense = false, dense_errors = dense, - calculate_error = true, - k = nothing, - alg_choice = nothing, - interp = LinearInterpolation(t, u), - retcode = ReturnCode.Default, destats = missing, stats = nothing, - kwargs...) + alg, t, u; timeseries_errors = length(u) > 2, + dense = false, dense_errors = dense, + calculate_error = true, + k = nothing, + alg_choice = nothing, + interp = LinearInterpolation(t, u), + retcode = ReturnCode.Default, destats = missing, stats = nothing, + kwargs...) T = eltype(eltype(u)) if prob.u0 === nothing @@ -314,7 +312,7 @@ function build_solution(prob::Union{AbstractODEProblem, AbstractDDEProblem}, end function calculate_solution_errors!(sol::AbstractODESolution; fill_uanalytic = true, - timeseries_errors = true, dense_errors = true) + timeseries_errors = true, dense_errors = true) f = sol.prob.f if fill_uanalytic diff --git a/src/solutions/optimization_solutions.jl b/src/solutions/optimization_solutions.jl index 9f04a5bdd4..a16490345d 100644 --- a/src/solutions/optimization_solutions.jl +++ b/src/solutions/optimization_solutions.jl @@ -30,12 +30,12 @@ struct OptimizationSolution{T, N, uType, C <: AbstractOptimizationCache, A, OV, end function build_solution(cache::AbstractOptimizationCache, - alg, u, objective; - retcode = ReturnCode.Default, - original = nothing, - solve_time = nothing, - stats = nothing, - kwargs...) + alg, u, objective; + retcode = ReturnCode.Default, + original = nothing, + solve_time = nothing, + stats = nothing, + kwargs...) T = eltype(eltype(u)) N = ndims(u) @@ -63,10 +63,10 @@ end # for compatibility function build_solution(prob::AbstractOptimizationProblem, - alg, u, objective; - retcode = ReturnCode.Default, - original = nothing, - kwargs...) + alg, u, objective; + retcode = ReturnCode.Default, + original = nothing, + kwargs...) T = eltype(eltype(u)) N = ndims(u) @@ -104,7 +104,7 @@ function Base.show(io::IO, A::AbstractOptimizationSolution) end Base.@propagate_inbounds function Base.getproperty(x::AbstractOptimizationSolution, - s::Symbol) + s::Symbol) if s === :minimizer Base.depwarn("`sol.minimizer` is deprecated. Use `sol.u` instead.", "sol.minimizer") diff --git a/src/solutions/pde_solutions.jl b/src/solutions/pde_solutions.jl index 8093eb8916..f8961a94d9 100644 --- a/src/solutions/pde_solutions.jl +++ b/src/solutions/pde_solutions.jl @@ -112,9 +112,9 @@ end Intercept PDE wrapping. Please implement a method for the PDESolution types in your discretizer. """ function SciMLBase.wrap_sol(sol, - metadata::AbstractDiscretizationMetadata{hasTime}) where { - hasTime, -} + metadata::AbstractDiscretizationMetadata{hasTime}) where { + hasTime, + } if hasTime isa Val{true} return PDETimeSeriesSolution(sol, metadata) else diff --git a/src/solutions/rode_solutions.jl b/src/solutions/rode_solutions.jl index 350c3c8237..b1bb2c1e03 100644 --- a/src/solutions/rode_solutions.jl +++ b/src/solutions/rode_solutions.jl @@ -62,21 +62,21 @@ end TruncatedStacktraces.@truncate_stacktrace RODESolution 1 2 function (sol::RODESolution)(t, ::Type{deriv} = Val{0}; idxs = nothing, - continuity = :left) where {deriv} + continuity = :left) where {deriv} sol.interp(t, idxs, deriv, sol.prob.p, continuity) end function (sol::RODESolution)(v, t, ::Type{deriv} = Val{0}; idxs = nothing, - continuity = :left) where {deriv} + continuity = :left) where {deriv} sol.interp(v, t, idxs, deriv, sol.prob.p, continuity) end function build_solution(prob::Union{AbstractRODEProblem, AbstractSDDEProblem}, - alg, t, u; W = nothing, timeseries_errors = length(u) > 2, - dense = false, dense_errors = dense, calculate_error = true, - interp = LinearInterpolation(t, u), - retcode = ReturnCode.Default, - alg_choice = nothing, - seed = UInt64(0), destats = missing, stats = nothing, kwargs...) + alg, t, u; W = nothing, timeseries_errors = length(u) > 2, + dense = false, dense_errors = dense, calculate_error = true, + interp = LinearInterpolation(t, u), + retcode = ReturnCode.Default, + alg_choice = nothing, + seed = UInt64(0), destats = missing, stats = nothing, kwargs...) T = eltype(eltype(u)) N = length((size(prob.u0)..., length(u))) @@ -133,7 +133,7 @@ function build_solution(prob::Union{AbstractRODEProblem, AbstractSDDEProblem}, end function calculate_solution_errors!(sol::AbstractRODESolution; fill_uanalytic = true, - timeseries_errors = true, dense_errors = true) + timeseries_errors = true, dense_errors = true) if sol.prob.f isa Tuple f = sol.prob.f[1] else diff --git a/src/solutions/solution_interface.jl b/src/solutions/solution_interface.jl index 04dc0e1520..e739bd613d 100644 --- a/src/solutions/solution_interface.jl +++ b/src/solutions/solution_interface.jl @@ -42,30 +42,30 @@ end # For handling ambiguities for T in [Int, Colon] @eval Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, - I::$T) + I::$T) A.u[I] end end Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, - I::Union{Int, AbstractArray{Int}, - CartesianIndex, Colon, BitArray, - AbstractArray{Bool}}...) + I::Union{Int, AbstractArray{Int}, + CartesianIndex, Colon, BitArray, + AbstractArray{Bool}}...) RecursiveArrayTools.VectorOfArray(A.u)[I...] end Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, i::Int, - ::Colon) + ::Colon) [A.u[j][i] for j in 1:length(A)] end Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, ::Colon, - i::Int) + i::Int) A.u[i] end Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, i::Int, - II::AbstractArray{Int}) + II::AbstractArray{Int}) [A.u[j][i] for j in II] end Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, - ii::CartesianIndex) + ii::CartesianIndex) ti = Tuple(ii) i = last(ti) jj = CartesianIndex(Base.front(ti)) @@ -235,20 +235,20 @@ DEFAULT_PLOT_FUNC(x...) = (x...,) DEFAULT_PLOT_FUNC(x, y, z) = (x, y, z) # For v0.5.2 bug @recipe function f(sol::AbstractTimeseriesSolution; - plot_analytic = false, - denseplot = (sol.dense || - sol.prob isa AbstractDiscreteProblem) && - !(sol isa AbstractRODESolution) && - !(hasfield(typeof(sol), :interp) && - sol.interp isa SensitivityInterpolation), - plotdensity = min(Int(1e5), - sol.tslocation == 0 ? - (sol.prob isa AbstractDiscreteProblem ? - max(1000, 100 * length(sol)) : - max(1000, 10 * length(sol))) : - 1000 * sol.tslocation), - tspan = nothing, axis_safety = 0.1, - vars = nothing, idxs = nothing) + plot_analytic = false, + denseplot = (sol.dense || + sol.prob isa AbstractDiscreteProblem) && + !(sol isa AbstractRODESolution) && + !(hasfield(typeof(sol), :interp) && + sol.interp isa SensitivityInterpolation), + plotdensity = min(Int(1e5), + sol.tslocation == 0 ? + (sol.prob isa AbstractDiscreteProblem ? + max(1000, 100 * length(sol)) : + max(1000, 10 * length(sol))) : + 1000 * sol.tslocation), + tspan = nothing, axis_safety = 0.1, + vars = nothing, idxs = nothing) if vars !== nothing Base.depwarn("To maintain consistency with solution indexing, keyword argument vars will be removed in a future version. Please use keyword argument idxs instead.", :f; force = true) @@ -363,7 +363,7 @@ DEFAULT_PLOT_FUNC(x, y, z) = (x, y, z) # For v0.5.2 bug end function diffeq_to_arrays(sol, plot_analytic, denseplot, plotdensity, tspan, axis_safety, - vars, int_vars, tscale, strs) + vars, int_vars, tscale, strs) if tspan === nothing if sol.tslocation == 0 end_idx = length(sol) @@ -652,7 +652,7 @@ function u_n(timeseries::AbstractArray, sym, sol, plott, plot_timeseries) end function solplot_vecs_and_labels(dims, vars, plot_timeseries, plott, sol, plot_analytic, - plot_analytic_timeseries, strs) + plot_analytic_timeseries, strs) plot_vecs = [] labels = String[] for x in vars diff --git a/src/solve.jl b/src/solve.jl index f9c98c32f4..a9cc04fca0 100644 --- a/src/solve.jl +++ b/src/solve.jl @@ -89,7 +89,7 @@ end ``` """ function solve(prob::OptimizationProblem, alg, args...; - kwargs...)::AbstractOptimizationSolution + kwargs...)::AbstractOptimizationSolution if supports_opt_cache_interface(alg) solve!(init(prob, alg, args...; kwargs...)) else @@ -98,7 +98,9 @@ function solve(prob::OptimizationProblem, alg, args...; end end -function SciMLBase.solve(prob::EnsembleProblem{T}, args...; kwargs...) where {T <: OptimizationProblem} +function SciMLBase.solve(prob::EnsembleProblem{T}, + args...; + kwargs...) where {T <: OptimizationProblem} return SciMLBase.__solve(prob, args...; kwargs...) end @@ -181,7 +183,7 @@ end supports_opt_cache_interface(alg) = false function __solve(cache::AbstractOptimizationCache)::AbstractOptimizationSolution end function __init(prob::OptimizationProblem, alg, args...; - kwargs...)::AbstractOptimizationCache + kwargs...)::AbstractOptimizationCache throw(OptimizerMissingError(alg)) end diff --git a/src/utils.jl b/src/utils.jl index 6eeaffc094..30b0ed6745 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -14,16 +14,16 @@ function numargs(f) end function numargs(f::RuntimeGeneratedFunctions.RuntimeGeneratedFunction{ - T, - V, - W, - I, -}) where { - T, - V, - W, - I, -} + T, + V, + W, + I, + }) where { + T, + V, + W, + I, + } (length(T),) end @@ -240,7 +240,7 @@ form is disabled and the 2-argument signature is ensured to be matched. * [`numargs`](@ref numargs) """ function isinplace(f, inplace_param_number, fname = "f", iip_preferred = true; - has_two_dispatches = true, isoptimization = false) + has_two_dispatches = true, isoptimization = false) nargs = numargs(f) iip_dispatch = any(x -> x == inplace_param_number, nargs) oop_dispatch = any(x -> x == inplace_param_number - 1, nargs) @@ -261,10 +261,10 @@ function isinplace(f, inplace_param_number, fname = "f", iip_preferred = true; else methods(f).ms[1].sig.parameters end - + for i in 1:length(nargs) if nargs[i] < inplace_param_number && - any(isequal(Vararg{Any}),_parameters) + any(isequal(Vararg{Any}), _parameters) # If varargs, assume iip return iip_preferred end @@ -305,7 +305,7 @@ end isinplace(f::AbstractSciMLFunction{iip}) where {iip} = iip function isinplace(f::AbstractSciMLFunction{iip}, inplace_param_number, - fname = nothing) where {iip} + fname = nothing) where {iip} iip end @@ -391,7 +391,7 @@ function add_kwonly(::Type{<:Val}, ex) end function add_kwonly(::Union{Type{Val{:function}}, - Type{Val{:(=)}}}, ex::Expr) + Type{Val{:(=)}}}, ex::Expr) body = ex.args[2:end] # function body default_call = ex.args[1] # e.g., :(f(a, b=2; c=3)) kwonly_call = add_kwonly(default_call) diff --git a/test/convert_tests.jl b/test/convert_tests.jl index 20fba4f86d..b6904310a4 100644 --- a/test/convert_tests.jl +++ b/test/convert_tests.jl @@ -36,14 +36,14 @@ end @testset "Convert ODEProblem with kwargs to NonlinearProblem" begin function lorenz!(du, u, p, t) - du[1] = p[1]*(u[2] - u[1]) + du[1] = p[1] * (u[2] - u[1]) du[2] = u[1] * (p[2] - u[3]) - u[2] du[3] = u[1] * u[2] - p[3] * u[3] end u0 = [1.0; 0.0; 0.0] tspan = (0.0, 100.0) - p = [10.0,28.0,8/3] - prob = ODEProblem(lorenz!, u0, tspan,p;a=1.0,b=2.0) + p = [10.0, 28.0, 8 / 3] + prob = ODEProblem(lorenz!, u0, tspan, p; a = 1.0, b = 2.0) nlprob = NonlinearProblem(prob) @test nlprob.kwargs[:a] == prob.kwargs[:a] @test nlprob.kwargs[:b] == prob.kwargs[:b] @@ -51,15 +51,15 @@ end @testset "Convert ODEProblem with kwargs to SteadyStateProblem" begin function lorenz!(du, u, p, t) - du[1] = p[1]*(u[2] - u[1]) + du[1] = p[1] * (u[2] - u[1]) du[2] = u[1] * (p[2] - u[3]) - u[2] du[3] = u[1] * u[2] - p[3] * u[3] end u0 = [1.0; 0.0; 0.0] tspan = (0.0, 100.0) - p = [10.0,28.0,8/3] - prob = ODEProblem(lorenz!, u0, tspan,p;a=1.0,b=2.0) + p = [10.0, 28.0, 8 / 3] + prob = ODEProblem(lorenz!, u0, tspan, p; a = 1.0, b = 2.0) sprob = SteadyStateProblem(prob) @test sprob.kwargs[:a] == prob.kwargs[:a] @test sprob.kwargs[:b] == prob.kwargs[:b] -end \ No newline at end of file +end diff --git a/test/downstream/ensemble_diffeq.jl b/test/downstream/ensemble_diffeq.jl index 7f53b65789..ea792a8ee3 100644 --- a/test/downstream/ensemble_diffeq.jl +++ b/test/downstream/ensemble_diffeq.jl @@ -6,4 +6,4 @@ function prob_func(prob, i, repeat) end ensemble_prob = EnsembleProblem(prob, prob_func = prob_func) sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 10) -@test sim isa EnsembleSolution \ No newline at end of file +@test sim isa EnsembleSolution diff --git a/test/downstream/ensemble_nondes.jl b/test/downstream/ensemble_nondes.jl index 789bdb4fc2..f7028600e7 100644 --- a/test/downstream/ensemble_nondes.jl +++ b/test/downstream/ensemble_nondes.jl @@ -8,21 +8,40 @@ optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optf, x0) sol1 = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 5) -ensembleprob = Optimization.EnsembleProblem(prob, [x0, x0 .+ rand(2), x0 .+ rand(2), x0 .+ rand(2)]) - -sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleThreads(), trajectories = 4, maxiters = 5) +ensembleprob = Optimization.EnsembleProblem(prob, + [x0, x0 .+ rand(2), x0 .+ rand(2), x0 .+ rand(2)]) + +sol = Optimization.solve(ensembleprob, + OptimizationOptimJL.BFGS(), + EnsembleThreads(), + trajectories = 4, + maxiters = 5) @test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective -sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleDistributed(), trajectories = 4, maxiters = 5) +sol = Optimization.solve(ensembleprob, + OptimizationOptimJL.BFGS(), + EnsembleDistributed(), + trajectories = 4, + maxiters = 5) @test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective prob = OptimizationProblem(optf, x0, lb = [-0.5, -0.5], ub = [0.5, 0.5]) -ensembleprob = Optimization.EnsembleProblem(prob, 5, prob_func = (prob, i, repeat) -> remake(prob, u0 = rand(-0.5:0.001:0.5, 2))) - -sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleThreads(), trajectories = 5, maxiters = 5) +ensembleprob = Optimization.EnsembleProblem(prob, + 5, + prob_func = (prob, i, repeat) -> remake(prob, u0 = rand(-0.5:0.001:0.5, 2))) + +sol = Optimization.solve(ensembleprob, + OptimizationOptimJL.BFGS(), + EnsembleThreads(), + trajectories = 5, + maxiters = 5) @test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective -sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleDistributed(), trajectories = 5, maxiters = 5) +sol = Optimization.solve(ensembleprob, + OptimizationOptimJL.BFGS(), + EnsembleDistributed(), + trajectories = 5, + maxiters = 5) @test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective using NonlinearSolve @@ -35,4 +54,4 @@ ensembleprob = EnsembleProblem(prob, [u0, u0 .+ rand(2), u0 .+ rand(2), u0 .+ ra sol = solve(ensembleprob, EnsembleThreads(), trajectories = 4, maxiters = 100) -sol = solve(ensembleprob, EnsembleDistributed(), trajectories = 4, maxiters = 100) \ No newline at end of file +sol = solve(ensembleprob, EnsembleDistributed(), trajectories = 4, maxiters = 100) diff --git a/test/downstream/ensemble_stats.jl b/test/downstream/ensemble_stats.jl index cec72f05ea..0e8b3e06a4 100644 --- a/test/downstream/ensemble_stats.jl +++ b/test/downstream/ensemble_stats.jl @@ -1,13 +1,13 @@ using OrdinaryDiffEq using Test -f(u,p,t) = 1.01*u -u0=1/2 -tspan = (0.0,1.0) -prob = ODEProblem(f,u0,tspan) +f(u, p, t) = 1.01 * u +u0 = 1 / 2 +tspan = (0.0, 1.0) +prob = ODEProblem(f, u0, tspan) function prob_func(prob, i, repeat) remake(prob, u0 = rand() * prob.u0) end ensemble_prob = EnsembleProblem(prob, prob_func = prob_func) sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 10) -@test sim.stats.nf == mapreduce(x -> x.stats.nf, +, sim.u) \ No newline at end of file +@test sim.stats.nf == mapreduce(x -> x.stats.nf, +, sim.u) diff --git a/test/downstream/nllsopt.jl b/test/downstream/nllsopt.jl index e70ed8ac25..a7d5204ac6 100644 --- a/test/downstream/nllsopt.jl +++ b/test/downstream/nllsopt.jl @@ -25,4 +25,4 @@ optprob = OptimizationProblem(optf, prob_oop.u0, prob_oop.p) @time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8) optprob = OptimizationProblem(prob_oop, AutoForwardDiff()) -@time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8) \ No newline at end of file +@time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8) diff --git a/test/function_building_error_messages.jl b/test/function_building_error_messages.jl index 486fde70f3..d992530246 100644 --- a/test/function_building_error_messages.jl +++ b/test/function_building_error_messages.jl @@ -21,7 +21,7 @@ end struct Foo{T} end f = Foo{1}() -(this::Foo{T})(args...) where T=1 +(this::Foo{T})(args...) where {T} = 1 @test SciMLBase.isinplace(Foo{Int}(), 4) ## Problem argument tests diff --git a/test/python/pythoncall.jl b/test/python/pythoncall.jl index 7f0758be1c..e0b8e19d3c 100644 --- a/test/python/pythoncall.jl +++ b/test/python/pythoncall.jl @@ -1,7 +1,7 @@ # PyCall and PythonCall must use the same Python interpreter. This environment variable # tells PythonCall to use the same Python interpreter as PyCall. See # https://github.com/JuliaPy/PythonCall.jl/blob/5f56a9b96b867a9f6742ab1d1e2361abd844e19f/docs/src/pycall.md#tips -ENV["JULIA_PYTHONCALL_EXE"]="@PyCall" +ENV["JULIA_PYTHONCALL_EXE"] = "@PyCall" using DifferentialEquations, PythonCall @@ -79,6 +79,7 @@ using DifferentialEquations, PythonCall end @testset "promotion" begin - _u0 = pyconvert(Any, pyeval("""de.SciMLBase.prepare_initial_state([1.0, 0, 0])""", @__MODULE__)) + _u0 = pyconvert(Any, + pyeval("""de.SciMLBase.prepare_initial_state([1.0, 0, 0])""", @__MODULE__)) @test _u0 isa Vector{Float64} end