diff --git a/Project.toml b/Project.toml
index b6cee24ab9..bb9281019e 100644
--- a/Project.toml
+++ b/Project.toml
@@ -71,6 +71,7 @@ Preferences = "1.3"
Printf = "1.9"
PyCall = "1.96"
PythonCall = "0.9"
+QuasiMonteCarlo = "0.3"
RCall = "0.13.18"
RecipesBase = "0.7.0, 0.8, 1.0"
RecursiveArrayTools = "2.33"
@@ -83,7 +84,6 @@ Statistics = "1"
SymbolicIndexingInterface = "0.2"
Tables = "1"
TruncatedStacktraces = "1"
-QuasiMonteCarlo = "0.3"
Zygote = "0.6"
julia = "1.9"
diff --git a/README.md b/README.md
index b294f98c80..653910d158 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@
[![codecov](https://codecov.io/gh/SciML/SciMLBase.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/SciML/SciMLBase.jl)
[![Build Status](https://github.com/SciML/SciMLBase.jl/workflows/CI/badge.svg)](https://github.com/SciML/SciMLBase.jl/actions?query=workflow%3ACI)
-[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
+[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor%27s%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle)
SciMLBase.jl is the core interface definition of the SciML ecosystem. It is a
@@ -17,11 +17,11 @@ supply the common interface and allow for interexchange of mathematical problems
The breaking changes in v2.0 are:
-* `IntegralProblem` has moved to an interface with `IntegralFunction` and `BatchedIntegralFunction` which requires specifying `prototype`s for the values to be modified
- instead of `nout` and `batch`. https://github.com/SciML/SciMLBase.jl/pull/497
-* `ODEProblem` was made temporarily into a `mutable struct` to allow for EnzymeRules support. Using the mutation throws a warning that this is only experimental and should not be relied on.
- https://github.com/SciML/SciMLBase.jl/pull/501
-* `BVProblem` now has a new interface for `TwoPointBVProblem` which splits the bc terms for the two sides, forcing a true two-point BVProblem to allow for further specializations and to allow
- for wrapping Fortran solvers in the interface. https://github.com/SciML/SciMLBase.jl/pull/477
-* `SDEProblem` constructor was changed to remove an anti-pattern which required passing the diffusion function `g` twice, i.e. `SDEProblem(SDEFunction(f,g),g, ...)`.
- Now this is simply `SDEProblem(SDEFunction(f,g),...)`. https://github.com/SciML/SciMLBase.jl/pull/489
+ - `IntegralProblem` has moved to an interface with `IntegralFunction` and `BatchedIntegralFunction` which requires specifying `prototype`s for the values to be modified
+ instead of `nout` and `batch`. https://github.com/SciML/SciMLBase.jl/pull/497
+ - `ODEProblem` was made temporarily into a `mutable struct` to allow for EnzymeRules support. Using the mutation throws a warning that this is only experimental and should not be relied on.
+ https://github.com/SciML/SciMLBase.jl/pull/501
+ - `BVProblem` now has a new interface for `TwoPointBVProblem` which splits the bc terms for the two sides, forcing a true two-point BVProblem to allow for further specializations and to allow
+ for wrapping Fortran solvers in the interface. https://github.com/SciML/SciMLBase.jl/pull/477
+ - `SDEProblem` constructor was changed to remove an anti-pattern which required passing the diffusion function `g` twice, i.e. `SDEProblem(SDEFunction(f,g),g, ...)`.
+ Now this is simply `SDEProblem(SDEFunction(f,g),...)`. https://github.com/SciML/SciMLBase.jl/pull/489
diff --git a/docs/make.jl b/docs/make.jl
index 21eae53cf8..3bd8f23eb8 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -1,18 +1,21 @@
using Documenter, SciMLBase, ModelingToolkit
-cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true)
-cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true)
+cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml"; force = true)
+cp("./docs/Project.toml", "./docs/src/assets/Project.toml"; force = true)
include("pages.jl")
-makedocs(sitename = "SciMLBase.jl",
+makedocs(;
+ sitename = "SciMLBase.jl",
authors = "Chris Rackauckas",
modules = [SciMLBase, ModelingToolkit],
- clean = true, doctest = false, linkcheck = true,
+ clean = true,
+ doctest = false,
+ linkcheck = true,
warnonly = [:docs_block, :missing_docs],
- format = Documenter.HTML(assets = ["assets/favicon.ico"],
- canonical = "https://docs.sciml.ai/SciMLBase/stable"),
- pages = pages)
+ format = Documenter.HTML(;
+ assets = ["assets/favicon.ico"],
+ canonical = "https://docs.sciml.ai/SciMLBase/stable",),
+ pages = pages,)
-deploydocs(repo = "github.com/SciML/SciMLBase.jl.git";
- push_preview = true)
+deploydocs(; repo = "github.com/SciML/SciMLBase.jl.git", push_preview = true)
diff --git a/docs/src/fundamentals/FAQ.md b/docs/src/fundamentals/FAQ.md
index 69a6530652..77b0d02dac 100644
--- a/docs/src/fundamentals/FAQ.md
+++ b/docs/src/fundamentals/FAQ.md
@@ -16,4 +16,4 @@ See [ColPrac: Contributor's Guide on Collaborative Practices for Community Packa
## Are there developer programs to help fund parties interested in helping develop SciML?
-Yes! See [the SciML Developer Programs](https://sciml.ai/dev/) webpage.
\ No newline at end of file
+Yes! See [the SciML Developer Programs](https://sciml.ai/dev/) webpage.
diff --git a/docs/src/index.md b/docs/src/index.md
index 88af2fde8e..edf32f017c 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -13,36 +13,43 @@ shared/common functionality.
The SciML common interface covers the following domains:
-- Linear systems (`LinearProblem`)
- - Direct methods for dense and sparse
- - Iterative solvers with preconditioning
-- Nonlinear Systems (`NonlinearProblem`)
- - Rootfinding for systems of nonlinear equations
-- Interval Nonlinear Systems
- - Bracketing rootfinders for nonlinear equations with interval bounds
-- Integrals (quadrature) (`IntegralProblem`)
-- Differential Equations
- - Discrete equations (function maps, discrete stochastic (Gillespie/Markov)
- simulations) (`DiscreteProblem`)
- - Ordinary differential equations (ODEs) (`ODEProblem`)
- - Split and Partitioned ODEs (Symplectic integrators, IMEX Methods) (`SplitODEProblem`)
- - Stochastic ordinary differential equations (SODEs or SDEs) (`SDEProblem`)
- - Stochastic differential-algebraic equations (SDAEs) (`SDEProblem` with mass matrices)
- - Random differential equations (RODEs or RDEs) (`RODEProblem`)
- - Differential algebraic equations (DAEs) (`DAEProblem` and `ODEProblem` with mass matrices)
- - Delay differential equations (DDEs) (`DDEProblem`)
- - Neutral, retarded, and algebraic delay differential equations (NDDEs, RDDEs, and DDAEs)
- - Stochastic delay differential equations (SDDEs) (`SDDEProblem`)
- - Experimental support for stochastic neutral, retarded, and algebraic delay differential equations (SNDDEs, SRDDEs, and SDDAEs)
- - Mixed discrete and continuous equations (Hybrid Equations, Jump Diffusions) (`AbstractDEProblem`s with callbacks)
-- Optimization (`OptimizationProblem`)
- - Nonlinear (constrained) optimization
-- (Stochastic/Delay/Differential-Algebraic) Partial Differential Equations (`PDESystem`)
- - Finite difference and finite volume methods
- - Interfaces to finite element methods
- - Physics-Informed Neural Networks (PINNs)
- - Integro-Differential Equations
- - Fractional Differential Equations
+ - Linear systems (`LinearProblem`)
+
+ + Direct methods for dense and sparse
+ + Iterative solvers with preconditioning
+
+ - Nonlinear Systems (`NonlinearProblem`)
+
+ + Rootfinding for systems of nonlinear equations
+ - Interval Nonlinear Systems
+
+ + Bracketing rootfinders for nonlinear equations with interval bounds
+ - Integrals (quadrature) (`IntegralProblem`)
+ - Differential Equations
+
+ + Discrete equations (function maps, discrete stochastic (Gillespie/Markov)
+ simulations) (`DiscreteProblem`)
+ + Ordinary differential equations (ODEs) (`ODEProblem`)
+ + Split and Partitioned ODEs (Symplectic integrators, IMEX Methods) (`SplitODEProblem`)
+ + Stochastic ordinary differential equations (SODEs or SDEs) (`SDEProblem`)
+ + Stochastic differential-algebraic equations (SDAEs) (`SDEProblem` with mass matrices)
+ + Random differential equations (RODEs or RDEs) (`RODEProblem`)
+ + Differential algebraic equations (DAEs) (`DAEProblem` and `ODEProblem` with mass matrices)
+ + Delay differential equations (DDEs) (`DDEProblem`)
+ + Neutral, retarded, and algebraic delay differential equations (NDDEs, RDDEs, and DDAEs)
+ + Stochastic delay differential equations (SDDEs) (`SDDEProblem`)
+ + Experimental support for stochastic neutral, retarded, and algebraic delay differential equations (SNDDEs, SRDDEs, and SDDAEs)
+ + Mixed discrete and continuous equations (Hybrid Equations, Jump Diffusions) (`AbstractDEProblem`s with callbacks)
+ - Optimization (`OptimizationProblem`)
+
+ + Nonlinear (constrained) optimization
+ - (Stochastic/Delay/Differential-Algebraic) Partial Differential Equations (`PDESystem`)
+
+ + Finite difference and finite volume methods
+ + Interfaces to finite element methods
+ + Physics-Informed Neural Networks (PINNs)
+ + Integro-Differential Equations
+ + Fractional Differential Equations
The SciML common interface also includes
[ModelingToolkit.jl](https://docs.sciml.ai/ModelingToolkit/stable/)
@@ -55,11 +62,13 @@ patterns.
In addition to the purely numerical representations of mathematical objects, there are also
sets of problem types associated with common mathematical algorithms. These are:
-- Data-driven modeling
- - Discrete-time data-driven dynamical systems (`DiscreteDataDrivenProblem`)
- - Continuous-time data-driven dynamical systems (`ContinuousDataDrivenProblem`)
- - Symbolic regression (`DirectDataDrivenProblem`)
-- Uncertainty quantification and expected values (`ExpectationProblem`)
+ - Data-driven modeling
+
+ + Discrete-time data-driven dynamical systems (`DiscreteDataDrivenProblem`)
+ + Continuous-time data-driven dynamical systems (`ContinuousDataDrivenProblem`)
+ + Symbolic regression (`DirectDataDrivenProblem`)
+
+ - Uncertainty quantification and expected values (`ExpectationProblem`)
## Inverse Problems, Parameter Estimation, and Structural Identification
@@ -73,84 +82,103 @@ find the parameters `p` that solve the inverse problem.
The SciML interface is common as the usage of arguments is standardized across
all of the problem domains. Underlying high level ideas include:
-- All domains use the same interface of defining a `AbstractSciMLProblem` which is then
- solved via `solve(prob,alg;kwargs)`, where `alg` is a `AbstractSciMLAlgorithm`. The
- keyword argument namings are standardized across the organization.
-- `AbstractSciMLProblem`s are generally defined by a `SciMLFunction` which can define
- extra details about a model function, such as its analytical Jacobian, its
- sparsity patterns and so on.
-- There is an organization-wide method for defining linear and nonlinear solvers
- used within other solvers, giving maximum control of performance to the user.
-- Types used within the packages are defined by the input types. For example,
- packages attempt to internally use the type of the initial condition as the
- type for the state within differential equation solvers.
-- `solve` calls should be thread-safe and parallel-safe.
-- `init(prob,alg;kwargs)` returns an iterator which allows for directly iterating
- over the solution process
-- High performance is key. Any performance that is not at the top level is considered
- a bug and should be reported as such.
-- All functions have an in-place and out-of-place form, where the in-place form
- is made to utilize mutation for high performance on large-scale problems and
- the out-of-place form is for compatibility with tooling like static arrays and
- some reverse-mode automatic differentiation systems.
+ - All domains use the same interface of defining a `AbstractSciMLProblem` which is then
+ solved via `solve(prob,alg;kwargs)`, where `alg` is a `AbstractSciMLAlgorithm`. The
+ keyword argument namings are standardized across the organization.
+ - `AbstractSciMLProblem`s are generally defined by a `SciMLFunction` which can define
+ extra details about a model function, such as its analytical Jacobian, its
+ sparsity patterns and so on.
+ - There is an organization-wide method for defining linear and nonlinear solvers
+ used within other solvers, giving maximum control of performance to the user.
+ - Types used within the packages are defined by the input types. For example,
+ packages attempt to internally use the type of the initial condition as the
+ type for the state within differential equation solvers.
+ - `solve` calls should be thread-safe and parallel-safe.
+ - `init(prob,alg;kwargs)` returns an iterator which allows for directly iterating
+ over the solution process
+ - High performance is key. Any performance that is not at the top level is considered
+ a bug and should be reported as such.
+ - All functions have an in-place and out-of-place form, where the in-place form
+ is made to utilize mutation for high performance on large-scale problems and
+ the out-of-place form is for compatibility with tooling like static arrays and
+ some reverse-mode automatic differentiation systems.
## User-Facing Solver Libraries
-- [DifferentialEquations.jl](https://docs.sciml.ai/DiffEqDocs/stable/)
- - Multi-package interface of high performance numerical solvers of
- differential equations
-- [ModelingToolkit.jl](https://docs.sciml.ai/ModelingToolkit/stable/)
- - The symbolic modeling package which implements the SciML symbolic common
- interface.
-- [LinearSolve.jl](https://docs.sciml.ai/LinearSolve/stable/)
- - Multi-package interface for specifying linear solvers (direct, sparse,
- and iterative), along with tools for caching and preconditioners
- for use in large-scale modeling.
-- [NonlinearSolve.jl](https://docs.sciml.ai/NonlinearSolve/stable/)
- - High performance numerical solving of nonlinear systems.
-- [Integrals.jl](https://docs.sciml.ai/Integrals/stable/)
- - Multi-package interface for high performance, batched, and parallelized
- numerical quadrature.
-- [Optimization.jl](https://docs.sciml.ai/Optimization/stable/)
- - Multi-package interface for numerical solving of optimization problems.
-- [NeuralPDE.jl](https://docs.sciml.ai/NeuralPDE/stable/)
- - Physics-Informed Neural Network (PINN) package for transforming partial
- differential equations into optimization problems.
-- [DiffEqOperators.jl](https://docs.sciml.ai/DiffEqOperators/stable/)
- - Automated finite difference method (FDM) package for transforming partial
- differential equations into nonlinear problems and ordinary differential
- equations.
-- [DiffEqFlux.jl](https://docs.sciml.ai/DiffEqFlux/stable/)
- - High level package for scientific machine learning applications, such as
- neural and universal differential equations, solving of inverse problems,
- parameter estimation, nonlinear optimal control, and more.
-- [DataDrivenDiffEq.jl](https://docs.sciml.ai/DataDrivenDiffEq/stable/)
- - Multi-package interface for data-driven modeling, Koopman dynamic mode
- decomposition, symbolic regression/sparsification, and automated model
- discovery.
-- [SciMLExpectations.jl](https://docs.sciml.ai/SciMLExpectations/stable/)
- - Extension to the dynamical modeling tools for calculating expectations.
+ - [DifferentialEquations.jl](https://docs.sciml.ai/DiffEqDocs/stable/)
+
+ + Multi-package interface of high performance numerical solvers of
+ differential equations
+
+ - [ModelingToolkit.jl](https://docs.sciml.ai/ModelingToolkit/stable/)
+
+ + The symbolic modeling package which implements the SciML symbolic common
+ interface.
+ - [LinearSolve.jl](https://docs.sciml.ai/LinearSolve/stable/)
+
+ + Multi-package interface for specifying linear solvers (direct, sparse,
+ and iterative), along with tools for caching and preconditioners
+ for use in large-scale modeling.
+ - [NonlinearSolve.jl](https://docs.sciml.ai/NonlinearSolve/stable/)
+
+ + High performance numerical solving of nonlinear systems.
+ - [Integrals.jl](https://docs.sciml.ai/Integrals/stable/)
+
+ + Multi-package interface for high performance, batched, and parallelized
+ numerical quadrature.
+ - [Optimization.jl](https://docs.sciml.ai/Optimization/stable/)
+
+ + Multi-package interface for numerical solving of optimization problems.
+ - [NeuralPDE.jl](https://docs.sciml.ai/NeuralPDE/stable/)
+
+ + Physics-Informed Neural Network (PINN) package for transforming partial
+ differential equations into optimization problems.
+ - [DiffEqOperators.jl](https://docs.sciml.ai/DiffEqOperators/stable/)
+
+ + Automated finite difference method (FDM) package for transforming partial
+ differential equations into nonlinear problems and ordinary differential
+ equations.
+ - [DiffEqFlux.jl](https://docs.sciml.ai/DiffEqFlux/stable/)
+
+ + High level package for scientific machine learning applications, such as
+ neural and universal differential equations, solving of inverse problems,
+ parameter estimation, nonlinear optimal control, and more.
+ - [DataDrivenDiffEq.jl](https://docs.sciml.ai/DataDrivenDiffEq/stable/)
+
+ + Multi-package interface for data-driven modeling, Koopman dynamic mode
+ decomposition, symbolic regression/sparsification, and automated model
+ discovery.
+ - [SciMLExpectations.jl](https://docs.sciml.ai/SciMLExpectations/stable/)
+
+ + Extension to the dynamical modeling tools for calculating expectations.
## Interface Implementation Libraries
-- [SciMLBase.jl](https://docs.sciml.ai/SciMLBase/stable/)
- - The core package defining the interface which is consumed by the modeling
- and solver packages.
-- [DiffEqBase.jl](https://github.com/SciML/DiffEqBase.jl)
- - The core package defining the extended interface which is consumed by the
- differential equation solver packages.
-- [SciMLSensitivity.jl](https://docs.sciml.ai/SciMLSensitivity/stable/)
- - A package which pools together the definition of derivative overloads to
- define the common `sensealg` automatic differentiation interface.
-- [DiffEqNoiseProcess.jl](https://docs.sciml.ai/DiffEqNoiseProcess/stable/)
- - A package which defines the stochastic `AbstractNoiseProcess` interface
- for the SciML ecosystem.
-- [RecursiveArrayTools.jl](https://docs.sciml.ai/RecursiveArrayTools/stable/)
- - A package which defines the underlying `AbstractVectorOfArray` structure
- used as the output for all time series results.
-- [ArrayInterface.jl](https://docs.sciml.ai/ArrayInterface/stable/)
- - The package which defines the extended `AbstractArray` interface employed
- throughout the SciML ecosystem.
+ - [SciMLBase.jl](https://docs.sciml.ai/SciMLBase/stable/)
+
+ + The core package defining the interface which is consumed by the modeling
+ and solver packages.
+
+ - [DiffEqBase.jl](https://github.com/SciML/DiffEqBase.jl)
+
+ + The core package defining the extended interface which is consumed by the
+ differential equation solver packages.
+ - [SciMLSensitivity.jl](https://docs.sciml.ai/SciMLSensitivity/stable/)
+
+ + A package which pools together the definition of derivative overloads to
+ define the common `sensealg` automatic differentiation interface.
+ - [DiffEqNoiseProcess.jl](https://docs.sciml.ai/DiffEqNoiseProcess/stable/)
+
+ + A package which defines the stochastic `AbstractNoiseProcess` interface
+ for the SciML ecosystem.
+ - [RecursiveArrayTools.jl](https://docs.sciml.ai/RecursiveArrayTools/stable/)
+
+ + A package which defines the underlying `AbstractVectorOfArray` structure
+ used as the output for all time series results.
+ - [ArrayInterface.jl](https://docs.sciml.ai/ArrayInterface/stable/)
+
+ + The package which defines the extended `AbstractArray` interface employed
+ throughout the SciML ecosystem.
## Using-Facing Modeling Libraries
@@ -165,10 +193,13 @@ that mixes inference, symbolics, and numerics.
## External Binding Libraries
-- [diffeqr](https://github.com/SciML/diffeqr)
- - Solving differential equations in R using DifferentialEquations.jl with ModelingToolkit for JIT compilation and GPU-acceleration
-- [diffeqpy](https://github.com/SciML/diffeqpy)
- - Solving differential equations in Python using DifferentialEquations.jl
+ - [diffeqr](https://github.com/SciML/diffeqr)
+
+ + Solving differential equations in R using DifferentialEquations.jl with ModelingToolkit for JIT compilation and GPU-acceleration
+
+ - [diffeqpy](https://github.com/SciML/diffeqpy)
+
+ + Solving differential equations in Python using DifferentialEquations.jl
## Solver Libraries
@@ -177,62 +208,82 @@ There are too many to name here. Check out the
## Contributing
-- Please refer to the
- [SciML ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://github.com/SciML/ColPrac/blob/master/README.md)
- for guidance on PRs, issues, and other matters relating to contributing to SciML.
-- See the [SciML Style Guide](https://github.com/SciML/SciMLStyle) for common coding practices and other style decisions.
-- There are a few community forums:
- - The #diffeq-bridged and #sciml-bridged channels in the
- [Julia Slack](https://julialang.org/slack/)
- - The #diffeq-bridged and #sciml-bridged channels in the
- [Julia Zulip](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged)
- - On the [Julia Discourse forums](https://discourse.julialang.org)
- - See also [SciML Community page](https://sciml.ai/community/)
+ - Please refer to the
+ [SciML ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://github.com/SciML/ColPrac/blob/master/README.md)
+ for guidance on PRs, issues, and other matters relating to contributing to SciML.
+
+ - See the [SciML Style Guide](https://github.com/SciML/SciMLStyle) for common coding practices and other style decisions.
+ - There are a few community forums:
+
+ + The #diffeq-bridged and #sciml-bridged channels in the
+ [Julia Slack](https://julialang.org/slack/)
+ + The #diffeq-bridged and #sciml-bridged channels in the
+ [Julia Zulip](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged)
+ + On the [Julia Discourse forums](https://discourse.julialang.org)
+ + See also [SciML Community page](https://sciml.ai/community/)
## Reproducibility
+
```@raw html
The documentation of this SciML package was built using these direct dependencies,
```
+
```@example
using Pkg # hide
Pkg.status() # hide
```
+
```@raw html
```
+
```@raw html
and using this machine and Julia version.
```
+
```@example
using InteractiveUtils # hide
versioninfo() # hide
```
+
```@raw html
```
+
```@raw html
A more complete overview of all dependencies and their versions is also provided.
```
+
```@example
using Pkg # hide
-Pkg.status(;mode = PKGMODE_MANIFEST) # hide
+Pkg.status(; mode = PKGMODE_MANIFEST) # hide
```
+
```@raw html
```
+
```@eval
using TOML
using Markdown
version = TOML.parse(read("../../Project.toml", String))["version"]
name = TOML.parse(read("../../Project.toml", String))["name"]
-link_manifest = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
- "/assets/Manifest.toml"
-link_project = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
- "/assets/Project.toml"
+link_manifest =
+ "https://github.com/SciML/" *
+ name *
+ ".jl/tree/gh-pages/v" *
+ version *
+ "/assets/Manifest.toml"
+link_project =
+ "https://github.com/SciML/" *
+ name *
+ ".jl/tree/gh-pages/v" *
+ version *
+ "/assets/Project.toml"
Markdown.parse("""You can also download the
[manifest]($link_manifest)
file and the
[project]($link_project)
file.
""")
-```
\ No newline at end of file
+```
diff --git a/docs/src/interfaces/Algorithms.md b/docs/src/interfaces/Algorithms.md
index b64cb76e63..eb2f4c9b15 100644
--- a/docs/src/interfaces/Algorithms.md
+++ b/docs/src/interfaces/Algorithms.md
@@ -5,7 +5,7 @@
`SciMLAlgorithms` are defined as types which have dispatches to the function signature:
```julia
-CommonSolve.solve(prob::AbstractSciMLProblem,alg::AbstractSciMLAlgorithm;kwargs...)
+CommonSolve.solve(prob::AbstractSciMLProblem, alg::AbstractSciMLAlgorithm; kwargs...)
```
### Algorithm-Specific Arguments
diff --git a/docs/src/interfaces/Common_Keywords.md b/docs/src/interfaces/Common_Keywords.md
index 28b80882fb..d0509722a3 100644
--- a/docs/src/interfaces/Common_Keywords.md
+++ b/docs/src/interfaces/Common_Keywords.md
@@ -23,47 +23,47 @@ way to only saving the solution at the final timepoint.
The following options are all related to output control. See the "Examples"
section at the end of this page for some example usage.
-* `dense`: Denotes whether to save the extra pieces required for dense (continuous)
- output. Default is `save_everystep && !isempty(saveat)` for algorithms which have
- the ability to produce dense output, i.e. by default it's `true` unless the user
- has turned off saving on steps or has chosen a `saveat` value. If `dense=false`,
- the solution still acts like a function, and `sol(t)` is a linear interpolation
- between the saved time points.
-* `saveat`: Denotes specific times to save the solution at, during the solving
- phase. The solver will save at each of the timepoints in this array in the
- most efficient manner available to the solver. If only `saveat` is given, then
- the arguments `save_everystep` and `dense` are `false` by default.
- If `saveat` is given a number, then it will automatically expand to
- `tspan[1]:saveat:tspan[2]`. For methods where interpolation is not possible,
- `saveat` may be equivalent to `tstops`. The default value is `[]`.
-* `save_idxs`: Denotes the indices for the components of the equation to save.
- Defaults to saving all indices. For example, if you are solving a 3-dimensional ODE,
- and given `save_idxs = [1, 3]`, only the first and third components of the
- solution will be outputted.
- Notice that of course in this case the outputted solution will be two-dimensional.
-* `tstops`: Denotes *extra* times that the timestepping algorithm must step to.
- This should be used to help the solver deal with discontinuities and
- singularities, since stepping exactly at the time of the discontinuity will
- improve accuracy. If a method cannot change timesteps (fixed timestep
- multistep methods), then `tstops` will use an interpolation,
- matching the behavior of `saveat`. If a method cannot change timesteps and
- also cannot interpolate, then `tstops` must be a multiple of `dt` or else an
- error will be thrown. Default is `[]`.
-* `d_discontinuities:` Denotes locations of discontinuities in low order derivatives.
- This will force FSAL algorithms which assume derivative continuity to re-evaluate
- the derivatives at the point of discontinuity. The default is `[]`.
-* `save_everystep`: Saves the result at every step.
- Default is true if `isempty(saveat)`.
-* `save_on`: Denotes whether intermediate solutions are saved. This overrides the
- settings of `dense`, `saveat` and `save_everystep` and is used by some applications
- to manually turn off saving temporarily. Everyday use of the solvers should leave
- this unchanged. Defaults to `true`.
-* `save_start`: Denotes whether the initial condition should be included in
- the solution type as the first timepoint. Defaults to `true`.
-* `save_end`: Denotes whether the final timepoint is forced to be saved,
- regardless of the other saving settings. Defaults to `true`.
-* `initialize_save`: Denotes whether to save after the callback initialization
- phase (when `u_modified=true`). Defaults to `true`.
+ - `dense`: Denotes whether to save the extra pieces required for dense (continuous)
+ output. Default is `save_everystep && !isempty(saveat)` for algorithms which have
+ the ability to produce dense output, i.e. by default it's `true` unless the user
+ has turned off saving on steps or has chosen a `saveat` value. If `dense=false`,
+ the solution still acts like a function, and `sol(t)` is a linear interpolation
+ between the saved time points.
+ - `saveat`: Denotes specific times to save the solution at, during the solving
+ phase. The solver will save at each of the timepoints in this array in the
+ most efficient manner available to the solver. If only `saveat` is given, then
+ the arguments `save_everystep` and `dense` are `false` by default.
+ If `saveat` is given a number, then it will automatically expand to
+ `tspan[1]:saveat:tspan[2]`. For methods where interpolation is not possible,
+ `saveat` may be equivalent to `tstops`. The default value is `[]`.
+ - `save_idxs`: Denotes the indices for the components of the equation to save.
+ Defaults to saving all indices. For example, if you are solving a 3-dimensional ODE,
+ and given `save_idxs = [1, 3]`, only the first and third components of the
+ solution will be outputted.
+ Notice that of course in this case the outputted solution will be two-dimensional.
+ - `tstops`: Denotes *extra* times that the timestepping algorithm must step to.
+ This should be used to help the solver deal with discontinuities and
+ singularities, since stepping exactly at the time of the discontinuity will
+ improve accuracy. If a method cannot change timesteps (fixed timestep
+ multistep methods), then `tstops` will use an interpolation,
+ matching the behavior of `saveat`. If a method cannot change timesteps and
+ also cannot interpolate, then `tstops` must be a multiple of `dt` or else an
+ error will be thrown. Default is `[]`.
+ - `d_discontinuities:` Denotes locations of discontinuities in low order derivatives.
+ This will force FSAL algorithms which assume derivative continuity to re-evaluate
+ the derivatives at the point of discontinuity. The default is `[]`.
+ - `save_everystep`: Saves the result at every step.
+ Default is true if `isempty(saveat)`.
+ - `save_on`: Denotes whether intermediate solutions are saved. This overrides the
+ settings of `dense`, `saveat` and `save_everystep` and is used by some applications
+ to manually turn off saving temporarily. Everyday use of the solvers should leave
+ this unchanged. Defaults to `true`.
+ - `save_start`: Denotes whether the initial condition should be included in
+ the solution type as the first timepoint. Defaults to `true`.
+ - `save_end`: Denotes whether the final timepoint is forced to be saved,
+ regardless of the other saving settings. Defaults to `true`.
+ - `initialize_save`: Denotes whether to save after the callback initialization
+ phase (when `u_modified=true`). Defaults to `true`.
Note that `dense` requires `save_everystep=true` and `saveat=false`.
@@ -73,57 +73,57 @@ These arguments control the timestepping routines.
#### Basic Stepsize Control
-* `adaptive`: Turns on adaptive timestepping for appropriate methods. Default
- is true.
-* `abstol`: Absolute tolerance in adaptive timestepping. This is the tolerance
- on local error estimates, not necessarily the global error (though these quantities
- are related).
-* `reltol`: Relative tolerance in adaptive timestepping. This is the tolerance
- on local error estimates, not necessarily the global error (though these quantities
- are related).
-* `dt`: Sets the initial stepsize. This is also the stepsize for fixed
- timestep methods. Defaults to an automatic choice if the method is adaptive.
-* `dtmax`: Maximum dt for adaptive timestepping. Defaults are
- package-dependent.
-* `dtmin`: Minimum dt for adaptive timestepping. Defaults are
- package-dependent.
+ - `adaptive`: Turns on adaptive timestepping for appropriate methods. Default
+ is true.
+ - `abstol`: Absolute tolerance in adaptive timestepping. This is the tolerance
+ on local error estimates, not necessarily the global error (though these quantities
+ are related).
+ - `reltol`: Relative tolerance in adaptive timestepping. This is the tolerance
+ on local error estimates, not necessarily the global error (though these quantities
+ are related).
+ - `dt`: Sets the initial stepsize. This is also the stepsize for fixed
+ timestep methods. Defaults to an automatic choice if the method is adaptive.
+ - `dtmax`: Maximum dt for adaptive timestepping. Defaults are
+ package-dependent.
+ - `dtmin`: Minimum dt for adaptive timestepping. Defaults are
+ package-dependent.
#### Fixed Stepsize Usage
Note that if a method does not have adaptivity, the following rules apply:
-* If `dt` is set, then the algorithm will step with size `dt` each iteration.
-* If `tstops` and `dt` are both set, then the algorithm will step with either a
- size `dt`, or use a smaller step to hit the `tstops` point.
-* If `tstops` is set without `dt`, then the algorithm will step directly to
- each value in `tstops`
-* If neither `dt` nor `tstops` are set, the solver will throw an error.
+ - If `dt` is set, then the algorithm will step with size `dt` each iteration.
+ - If `tstops` and `dt` are both set, then the algorithm will step with either a
+ size `dt`, or use a smaller step to hit the `tstops` point.
+ - If `tstops` is set without `dt`, then the algorithm will step directly to
+ each value in `tstops`
+ - If neither `dt` nor `tstops` are set, the solver will throw an error.
## Memory Optimizations
-* `alias_u0`: allows the solver to alias the initial condition array that is contained
- in the problem struct. Defaults to false.
-* `cache`: pass a solver cache to decrease the construction time. This is not implemented
- for any of the problem interfaces at this moment.
+ - `alias_u0`: allows the solver to alias the initial condition array that is contained
+ in the problem struct. Defaults to false.
+ - `cache`: pass a solver cache to decrease the construction time. This is not implemented
+ for any of the problem interfaces at this moment.
## Miscellaneous
-* `maxiters`: Maximum number of iterations before stopping.
-* `callback`: Specifies a callback function that is called between iterations.
-* `verbose`: Toggles whether warnings are thrown when the solver exits early.
- Defaults to true.
+ - `maxiters`: Maximum number of iterations before stopping.
+ - `callback`: Specifies a callback function that is called between iterations.
+ - `verbose`: Toggles whether warnings are thrown when the solver exits early.
+ Defaults to true.
## Progress Monitoring
These arguments control the usage of the progressbar in the logger.
-* `progress`: Turns on/off the Juno progressbar. Default is false.
-* `progress_steps`: Numbers of steps between updates of the progress bar.
- Default is 1000.
-* `progress_name`: Controls the name of the progressbar. Default is the name
- of the problem type.
-* `progress_message`: Controls the message with the progressbar. Defaults to
- showing `dt`, `t`, the maximum of `u`.
+ - `progress`: Turns on/off the Juno progressbar. Default is false.
+ - `progress_steps`: Numbers of steps between updates of the progress bar.
+ Default is 1000.
+ - `progress_name`: Controls the name of the progressbar. Default is the name
+ of the problem type.
+ - `progress_message`: Controls the message with the progressbar. Defaults to
+ showing `dt`, `t`, the maximum of `u`.
The progress bars all use the Julia Logging interface in order to be generic
to the IDE or programming tool that is used. For more information on how this
@@ -135,8 +135,8 @@ If you are using the test problems (i.e. `SciMLFunction`s where `f.analytic` is
defined), then options control the errors which are calculated. By default,
any cheap error estimates are always calculated. Extra keyword arguments include:
-* `timeseries_errors`
-* `dense_errors`
+ - `timeseries_errors`
+ - `dense_errors`
for specifying more expensive errors.
diff --git a/docs/src/interfaces/Differentiation.md b/docs/src/interfaces/Differentiation.md
index 66c0f8b971..dc78b4e973 100644
--- a/docs/src/interfaces/Differentiation.md
+++ b/docs/src/interfaces/Differentiation.md
@@ -7,14 +7,20 @@ of automatic differentiation overloads to dispatches defined in DiffEqSensitivit
a top-level `solve` definition, for example:
```julia
-function solve(prob::AbstractDEProblem, args...; sensealg=nothing,
- u0=nothing, p=nothing, kwargs...)
- u0 = u0 !== nothing ? u0 : prob.u0
- p = p !== nothing ? p : prob.p
- if sensealg === nothing && haskey(prob.kwargs, :sensealg)
- sensealg = prob.kwargs[:sensealg]
- end
- solve_up(prob, sensealg, u0, p, args...; kwargs...)
+function solve(
+ prob::AbstractDEProblem,
+ args...;
+ sensealg = nothing,
+ u0 = nothing,
+ p = nothing,
+ kwargs...,
+)
+ u0 = u0 !== nothing ? u0 : prob.u0
+ p = p !== nothing ? p : prob.p
+ if sensealg === nothing && haskey(prob.kwargs, :sensealg)
+ sensealg = prob.kwargs[:sensealg]
+ end
+ solve_up(prob, sensealg, u0, p, args...; kwargs...)
end
```
@@ -23,18 +29,28 @@ is required for the [ChainRules.jl](https://juliadiff.org/ChainRulesCore.jl/stab
interface. Then the `ChainRules` overloads are written on the `solve_up` calls, like:
```julia
-function ChainRulesCore.frule(::typeof(solve_up), prob,
- sensealg::Union{Nothing,AbstractSensitivityAlgorithm},
- u0, p, args...;
- kwargs...)
- _solve_forward(prob, sensealg, u0, p, args...; kwargs...)
+function ChainRulesCore.frule(
+ ::typeof(solve_up),
+ prob,
+ sensealg::Union{Nothing, AbstractSensitivityAlgorithm},
+ u0,
+ p,
+ args...;
+ kwargs...,
+)
+ _solve_forward(prob, sensealg, u0, p, args...; kwargs...)
end
-function ChainRulesCore.rrule(::typeof(solve_up), prob::SciMLBase.AbstractDEProblem,
- sensealg::Union{Nothing,AbstractSensitivityAlgorithm},
- u0, p, args...;
- kwargs...)
- _solve_adjoint(prob, sensealg, u0, p, args...; kwargs...)
+function ChainRulesCore.rrule(
+ ::typeof(solve_up),
+ prob::SciMLBase.AbstractDEProblem,
+ sensealg::Union{Nothing, AbstractSensitivityAlgorithm},
+ u0,
+ p,
+ args...;
+ kwargs...,
+)
+ _solve_adjoint(prob, sensealg, u0, p, args...; kwargs...)
end
```
@@ -43,11 +59,11 @@ mechanism is not added:
```julia
function _concrete_solve_adjoint(args...; kwargs...)
- error("No adjoint rules exist. Check that you added `using DiffEqSensitivity`")
+ error("No adjoint rules exist. Check that you added `using DiffEqSensitivity`")
end
function _concrete_solve_forward(args...; kwargs...)
- error("No sensitivity rules exist. Check that you added `using DiffEqSensitivity`")
+ error("No sensitivity rules exist. Check that you added `using DiffEqSensitivity`")
end
```
diff --git a/docs/src/interfaces/Init_Solve.md b/docs/src/interfaces/Init_Solve.md
index deac3a7e16..840749c7f2 100644
--- a/docs/src/interfaces/Init_Solve.md
+++ b/docs/src/interfaces/Init_Solve.md
@@ -9,8 +9,8 @@ solve(args...; kwargs...) = solve!(init(args...; kwargs...))
The interface for the three functions is as follows:
```julia
-init(::ProblemType, args...; kwargs...) :: IteratorType
-solve!(::IteratorType) :: SolutionType
+init(::ProblemType, args...; kwargs...)::IteratorType
+solve!(::IteratorType)::SolutionType
```
where `ProblemType`, `IteratorType`, and `SolutionType` are the types defined in
@@ -30,10 +30,10 @@ init(::AbstractVector, ::AlgorithmType)
have more direct handling over the internal solving process. Because of this
internal nature, the `IteratorType` has a less unified interface across problem
types than other portions like `ProblemType` and `SolutionType`. For example,
-for differential equations this is the
+for differential equations this is the
[Integrator Interface](https://docs.sciml.ai/DiffEqDocs/stable/basics/integrator/)
designed for mutating solutions in a manner for callback implementation, which
-is distinctly different from the
+is distinctly different from the
[LinearSolve init interface](https://docs.sciml.ai/LinearSolve/stable/tutorials/caching_interface)
which is designed for caching efficiency with reusing factorizations.
diff --git a/docs/src/interfaces/PDE.md b/docs/src/interfaces/PDE.md
index a85c926581..ba7812ea0b 100644
--- a/docs/src/interfaces/PDE.md
+++ b/docs/src/interfaces/PDE.md
@@ -46,24 +46,24 @@ domain type. A 2-tuple can be used to indicate an `Interval`.
Thus forms for the `indepvar` can be like:
```julia
-t ∈ (0.0,1.0)
-(t,x) ∈ UnitDisk()
-[v,w,x,y,z] ∈ VectorUnitBall(5)
+t ∈ (0.0, 1.0)
+(t, x) ∈ UnitDisk()
+[v, w, x, y, z] ∈ VectorUnitBall(5)
```
#### Domain Types (WIP)
-- `Interval(a,b)`: Defines the domain of an interval from `a` to `b` (requires explicit
-import from `DomainSets.jl`, but a 2-tuple can be used instead)
+ - `Interval(a,b)`: Defines the domain of an interval from `a` to `b` (requires explicit
+ import from `DomainSets.jl`, but a 2-tuple can be used instead)
## `discretize` and `symbolic_discretize`
The only functions which act on a PDESystem are the following:
-- `discretize(sys,discretizer)`: produces the outputted `AbstractSystem` or
- `AbstractSciMLProblem`.
-- `symbolic_discretize(sys,discretizer)`: produces a debugging symbolic description
- of the discretized problem.
+ - `discretize(sys,discretizer)`: produces the outputted `AbstractSystem` or
+ `AbstractSciMLProblem`.
+ - `symbolic_discretize(sys,discretizer)`: produces a debugging symbolic description
+ of the discretized problem.
## Boundary Conditions (WIP)
diff --git a/docs/src/interfaces/Problems.md b/docs/src/interfaces/Problems.md
index 4e50a58b26..7f13697cad 100644
--- a/docs/src/interfaces/Problems.md
+++ b/docs/src/interfaces/Problems.md
@@ -20,8 +20,8 @@ The following standard principles should be adhered to across all
Each `AbstractSciMLProblem` type can be called with an "is inplace" (iip) choice. For example:
```julia
-ODEProblem(f,u0,tspan,p)
-ODEProblem{iip}(f,u0,tspan,p)
+ODEProblem(f, u0, tspan, p)
+ODEProblem{iip}(f, u0, tspan, p)
```
which is a boolean for whether the function is in the inplace form (mutating to
@@ -44,8 +44,8 @@ scenarios but falls back to a runtime-optimal approach when further customizatio
Specialization levels are given as the second type parameter in `AbstractSciMLProblem`
constructors. For example, this is done via:
-```julia
-ODEProblem{iip,specialization}(f,u0,tspan,p)
+```julia
+ODEProblem{iip, specialization}(f, u0, tspan, p)
```
Note that `iip` choice is required for specialization choices to be made.
@@ -61,12 +61,12 @@ SciMLBase.FullSpecialize
```
!!! note
-
+
The specialization level must be precompile snooped in the appropriate solver
package in order to enable the full precompilation and system image generation
for zero-latency usage. By default, this is only done with AutoSpecialize and
on types `u isa Vector{Float64}`, `eltype(tspan) isa Float64`, and
- `p isa Union{Vector{Float64}, SciMLBase.NullParameters}`. Precompilation snooping
+ `p isa Union{Vector{Float64}, SciMLBase.NullParameters}`. Precompilation snooping
in the solvers can be done using the Preferences.jl setup on the appropriate
solver. See the solver library's documentation for more details.
@@ -117,7 +117,10 @@ shows how to set the specialization default to `FullSpecialize`:
```julia
using Preferences, UUIDs
-set_preferences!(UUID("0bca4576-84f4-4d90-8ffe-ffa030f20462"), "SpecializationLevel" => "FullSpecialize")
+set_preferences!(
+ UUID("0bca4576-84f4-4d90-8ffe-ffa030f20462"),
+ "SpecializationLevel" => "FullSpecialize",
+)
```
The default is `AutoSpecialize`.
diff --git a/docs/src/interfaces/SciMLFunctions.md b/docs/src/interfaces/SciMLFunctions.md
index ec75cfb132..9f062bff72 100644
--- a/docs/src/interfaces/SciMLFunctions.md
+++ b/docs/src/interfaces/SciMLFunctions.md
@@ -15,23 +15,23 @@ The following standard principles should be adhered to across all
The full interface available to the solvers is as follows:
-- `jac`: The Jacobian of the differential equation with respect to the state
- variable `u` at a time `t` with parameters `p`.
-- `paramjac`: The Jacobian of the differential equation with respect to `p` at
- state `u` at time `t`.
-- `analytic`: Defines an analytical solution using `u0` at time `t` with `p`
- which will cause the solvers to return errors. Used for testing.
-- `syms`: Allows you to name your variables for automatic names in plots and
- other output.
-- `jac_prototype`: Defines the type to be used for any internal Jacobians
- within the solvers.
-- `sparsity`: Defines the sparsity pattern to be used for the sparse differentiation
- schemes. By default this is equal to `jac_prototype`. See the sparsity handling
- portion of this page for more information.
-- `colorvec`: The coloring pattern used by the sparse differentiator. See the
- sparsity handling portion of this page for more information.
-- `observed`: A function which allows for generating other observables from a
- solution.
+ - `jac`: The Jacobian of the differential equation with respect to the state
+ variable `u` at a time `t` with parameters `p`.
+ - `paramjac`: The Jacobian of the differential equation with respect to `p` at
+ state `u` at time `t`.
+ - `analytic`: Defines an analytical solution using `u0` at time `t` with `p`
+ which will cause the solvers to return errors. Used for testing.
+ - `syms`: Allows you to name your variables for automatic names in plots and
+ other output.
+ - `jac_prototype`: Defines the type to be used for any internal Jacobians
+ within the solvers.
+ - `sparsity`: Defines the sparsity pattern to be used for the sparse differentiation
+ schemes. By default this is equal to `jac_prototype`. See the sparsity handling
+ portion of this page for more information.
+ - `colorvec`: The coloring pattern used by the sparse differentiator. See the
+ sparsity handling portion of this page for more information.
+ - `observed`: A function which allows for generating other observables from a
+ solution.
Each function type additionally has some specific arguments, refer to their
documentation for details.
@@ -55,7 +55,7 @@ be specified.
Each `SciMLFunction` type allows for specialization choices
```julia
-ODEFunction{iip,specialization}(f)
+ODEFunction{iip, specialization}(f)
```
which designates how the compiler should specialize on the model function `f`. For
@@ -73,10 +73,10 @@ The following example creates an inplace `ODEFunction` whose Jacobian is a `Diag
```julia
using LinearAlgebra
-f = (du,u,p,t) -> du .= t .* u
-jac = (J,u,p,t) -> (J[1,1] = t; J[2,2] = t; J)
+f = (du, u, p, t) -> du .= t .* u
+jac = (J, u, p, t) -> (J[1, 1] = t; J[2, 2] = t; J)
jp = Diagonal(zeros(2))
-fun = ODEFunction(f; jac=jac, jac_prototype=jp)
+fun = ODEFunction(f; jac = jac, jac_prototype = jp)
```
Note that the integrators will always make a deep copy of `fun.jac_prototype`, so
diff --git a/docs/src/interfaces/Solutions.md b/docs/src/interfaces/Solutions.md
index d503a7a702..5c25e31f77 100644
--- a/docs/src/interfaces/Solutions.md
+++ b/docs/src/interfaces/Solutions.md
@@ -26,7 +26,7 @@ to access the value of `t` at timestep `j`. For multi-dimensional systems, this
will address first by component and lastly by time, and thus
```julia
-sol[i,j]
+sol[i, j]
```
will be the `i`th component at timestep `j`. Hence, `sol[j][i] == sol[i, j]`. This is done because Julia is column-major, so the leading dimension should be contiguous in memory. If the independent variables had shape
@@ -34,27 +34,27 @@ will be the `i`th component at timestep `j`. Hence, `sol[j][i] == sol[i, j]`. Th
solutions with shape:
```julia
-sol[i,k,j]
+sol[i, k, j]
```
gives the `[i,k]` component of the system at timestep `j`. The colon operator is
supported, meaning that
```julia
-sol[i,:]
+sol[i, :]
```
gives the timeseries for the `i`th component.
### Common Field Names
-- `u`: the solution values
-- `t`: the independent variable values, matching the length of the solution, if applicable
-- `resid`: the residual of the solution, if applicable
-- `original`: the solution object from the original solver, if it's a wrapper algorithm
-- `retcode`: see the documentation section on return codes
-- `prob`: the problem that was solved
-- `alg`: the algorithm used to solve the problem
+ - `u`: the solution values
+ - `t`: the independent variable values, matching the length of the solution, if applicable
+ - `resid`: the residual of the solution, if applicable
+ - `original`: the solution object from the original solver, if it's a wrapper algorithm
+ - `retcode`: see the documentation section on return codes
+ - `prob`: the problem that was solved
+ - `alg`: the algorithm used to solve the problem
## [Return Codes (RetCodes)](@id retcodes)
diff --git a/ext/SciMLBaseChainRulesCoreExt.jl b/ext/SciMLBaseChainRulesCoreExt.jl
index 51334f9378..f087466968 100644
--- a/ext/SciMLBaseChainRulesCoreExt.jl
+++ b/ext/SciMLBaseChainRulesCoreExt.jl
@@ -1,16 +1,16 @@
module SciMLBaseChainRulesCoreExt
using SciMLBase
-import ChainRulesCore
+using ChainRulesCore: ChainRulesCore
import ChainRulesCore: NoTangent, @non_differentiable
function ChainRulesCore.rrule(config::ChainRulesCore.RuleConfig{
- >:ChainRulesCore.HasReverseMode,
- },
- ::typeof(getindex),
- VA::ODESolution,
- sym,
- j::Integer)
+ >:ChainRulesCore.HasReverseMode,
+ },
+ ::typeof(getindex),
+ VA::ODESolution,
+ sym,
+ j::Integer)
function ODESolution_getindex_pullback(Δ)
i = issymbollike(sym) ? sym_to_index(sym, VA) : sym
if i === nothing
@@ -18,19 +18,41 @@ function ChainRulesCore.rrule(config::ChainRulesCore.RuleConfig{
grz = rrule_via_ad(config, getter, sym, VA.u[j], VA.prob.p, VA.t[j])[2](Δ)
du = [k == j ? grz[2] : zero(VA.u[1]) for k in 1:length(VA.u)]
dp = grz[3] # pullback for p
- dprob = remake(VA.prob, p = dp)
+ dprob = remake(VA.prob; p = dp)
T = eltype(eltype(VA.u))
N = length(VA.prob.p)
- Δ′ = ODESolution{T, N, typeof(du), Nothing, Nothing, Nothing, Nothing,
- typeof(dprob), Nothing, Nothing, Nothing, Nothing}(du, nothing,
- nothing, nothing, nothing, dprob, nothing, nothing,
- VA.dense, 0, nothing, nothing, VA.retcode)
+ Δ′ = ODESolution{
+ T,
+ N,
+ typeof(du),
+ Nothing,
+ Nothing,
+ Nothing,
+ Nothing,
+ typeof(dprob),
+ Nothing,
+ Nothing,
+ Nothing,
+ Nothing,
+ }(du,
+ nothing,
+ nothing,
+ nothing,
+ nothing,
+ dprob,
+ nothing,
+ nothing,
+ VA.dense,
+ 0,
+ nothing,
+ nothing,
+ VA.retcode)
(NoTangent(), Δ′, NoTangent(), NoTangent())
else
du = [m == j ? [i == k ? Δ : zero(VA.u[1][1]) for k in 1:length(VA.u[1])] :
zero(VA.u[1]) for m in 1:length(VA.u)]
dp = zero(VA.prob.p)
- dprob = remake(VA.prob, p = dp)
+ dprob = remake(VA.prob; p = dp)
Δ′ = ODESolution{
T,
N,
@@ -69,8 +91,8 @@ function ChainRulesCore.rrule(::typeof(getindex), VA::ODESolution, sym)
if i === nothing
throw(error("AD of purely-symbolic slicing for observed quantities is not yet supported. Work around this by using `A[sym,i]` to access each element sequentially in the function being differentiated."))
else
- Δ′ = [[i == k ? Δ[j] : zero(x[1]) for k in 1:length(x)]
- for (x, j) in zip(VA.u, 1:length(VA))]
+ Δ′ = [[i == k ? Δ[j] : zero(x[1]) for k in 1:length(x)] for
+ (x, j) in zip(VA.u, 1:length(VA))]
(NoTangent(), Δ′, NoTangent())
end
end
@@ -94,11 +116,10 @@ function ChainRulesCore.rrule(::Type{SDEProblem}, args...; kwargs...)
end
function ChainRulesCore.rrule(::Type{
- <:ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
- T11, T12,
- }}, u,
- args...) where {T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12}
+ <:ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12},
+ },
+ u,
+ args...) where {T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12}
function ODESolutionAdjoint(ȳ)
(NoTangent(), ȳ, ntuple(_ -> NoTangent(), length(args))...)
end
@@ -108,10 +129,10 @@ function ChainRulesCore.rrule(::Type{
end
function ChainRulesCore.rrule(::Type{
- <:ODESolution{uType, tType, isinplace, P, NP, F, G, K,
- ND,
- }}, u,
- args...) where {uType, tType, isinplace, P, NP, F, G, K, ND}
+ <:ODESolution{uType, tType, isinplace, P, NP, F, G, K, ND},
+ },
+ u,
+ args...) where {uType, tType, isinplace, P, NP, F, G, K, ND}
function SDESolutionAdjoint(ȳ)
(NoTangent(), ȳ, ntuple(_ -> NoTangent(), length(args))...)
end
@@ -132,4 +153,4 @@ function ChainRulesCore.rrule(::SciMLBase.EnsembleSolution, sim, time, converged
out, EnsembleSolution_adjoint
end
-end
\ No newline at end of file
+end
diff --git a/ext/SciMLBasePythonCallExt.jl b/ext/SciMLBasePythonCallExt.jl
index 7426e4037f..6b5d920a77 100644
--- a/ext/SciMLBasePythonCallExt.jl
+++ b/ext/SciMLBasePythonCallExt.jl
@@ -13,7 +13,10 @@ function SciMLBase.numargs(f::Py)
pyconvert(Int, length(first(inspect.getfullargspec(f2))) - inspect.ismethod(f2))
end
-_pyconvert(x::Py) = pyisinstance(x, pybuiltins.list) ? _promoting_collect(_pyconvert(x) for x in x) : pyconvert(Any, x)
+function _pyconvert(x::Py)
+ pyisinstance(x, pybuiltins.list) ? _promoting_collect(_pyconvert(x) for x in x) :
+ pyconvert(Any, x)
+end
_pyconvert(x::PyList) = _promoting_collect(_pyconvert(x) for x in x)
_pyconvert(x) = x
diff --git a/ext/SciMLBaseRCallExt.jl b/ext/SciMLBaseRCallExt.jl
index 38a61a7f2c..1eda58cbee 100644
--- a/ext/SciMLBaseRCallExt.jl
+++ b/ext/SciMLBaseRCallExt.jl
@@ -8,4 +8,4 @@ function SciMLBase.isinplace(f::RFunction, args...; kwargs...)
false
end
-end
\ No newline at end of file
+end
diff --git a/ext/SciMLBaseZygoteExt.jl b/ext/SciMLBaseZygoteExt.jl
index c0171f4d4a..6a9bd5214c 100644
--- a/ext/SciMLBaseZygoteExt.jl
+++ b/ext/SciMLBaseZygoteExt.jl
@@ -4,9 +4,16 @@ using Zygote
using Zygote: @adjoint, pullback
import Zygote: literal_getproperty
using SciMLBase
-using SciMLBase: ODESolution, issymbollike, sym_to_index, remake,
- getobserved, build_solution, EnsembleSolution,
- NonlinearSolution, AbstractTimeseriesSolution
+using SciMLBase:
+ ODESolution,
+ issymbollike,
+ sym_to_index,
+ remake,
+ getobserved,
+ build_solution,
+ EnsembleSolution,
+ NonlinearSolution,
+ AbstractTimeseriesSolution
# This method resolves the ambiguity with the pullback defined in
# RecursiveArrayToolsZygoteExt
@@ -16,15 +23,36 @@ using SciMLBase: ODESolution, issymbollike, sym_to_index, remake,
du = [m == j ? [i == k ? Δ : zero(VA.u[1][1]) for k in 1:length(VA.u[1])] :
zero(VA.u[1]) for m in 1:length(VA.u)]
dp = zero(VA.prob.p)
- dprob = remake(VA.prob, p = dp)
+ dprob = remake(VA.prob; p = dp)
du, dprob
T = eltype(eltype(VA.u))
N = length(VA.prob.p)
- Δ′ = ODESolution{T, N, typeof(du), Nothing, Nothing, typeof(VA.t),
- typeof(VA.k), typeof(dprob), typeof(VA.alg), typeof(VA.interp),
- typeof(VA.destats), typeof(VA.alg_choice)}(du, nothing, nothing,
- VA.t, VA.k, dprob, VA.alg, VA.interp, VA.dense, 0, VA.destats,
- VA.alg_choice, VA.retcode)
+ Δ′ = ODESolution{
+ T,
+ N,
+ typeof(du),
+ Nothing,
+ Nothing,
+ typeof(VA.t),
+ typeof(VA.k),
+ typeof(dprob),
+ typeof(VA.alg),
+ typeof(VA.interp),
+ typeof(VA.destats),
+ typeof(VA.alg_choice),
+ }(du,
+ nothing,
+ nothing,
+ VA.t,
+ VA.k,
+ dprob,
+ VA.alg,
+ VA.interp,
+ VA.dense,
+ 0,
+ VA.destats,
+ VA.alg_choice,
+ VA.retcode)
(Δ′, nothing, nothing)
end
VA[i, j], ODESolution_getindex_pullback
@@ -38,22 +66,44 @@ end
grz = pullback(getter, sym, VA.u[j], VA.prob.p, VA.t[j])[2](Δ)
du = [k == j ? grz[2] : zero(VA.u[1]) for k in 1:length(VA.u)]
dp = grz[3] # pullback for p
- dprob = remake(VA.prob, p = dp)
+ dprob = remake(VA.prob; p = dp)
du, dprob
else
- du = [m == j ? [i == k ? Δ : zero(VA.u[1][1]) for k in 1:length(VA.u[1])] :
+ du = [m == j ?
+ [i == k ? Δ : zero(VA.u[1][1]) for k in 1:length(VA.u[1])] :
zero(VA.u[1]) for m in 1:length(VA.u)]
dp = zero(VA.prob.p)
- dprob = remake(VA.prob, p = dp)
+ dprob = remake(VA.prob; p = dp)
du, dprob
end
T = eltype(eltype(VA.u))
N = length(VA.prob.p)
- Δ′ = ODESolution{T, N, typeof(du), Nothing, Nothing, typeof(VA.t),
- typeof(VA.k), typeof(dprob), typeof(VA.alg), typeof(VA.interp),
- typeof(VA.destats), typeof(VA.alg_choice)}(du, nothing, nothing,
- VA.t, VA.k, dprob, VA.alg, VA.interp, VA.dense, 0, VA.destats,
- VA.alg_choice, VA.retcode)
+ Δ′ = ODESolution{
+ T,
+ N,
+ typeof(du),
+ Nothing,
+ Nothing,
+ typeof(VA.t),
+ typeof(VA.k),
+ typeof(dprob),
+ typeof(VA.alg),
+ typeof(VA.interp),
+ typeof(VA.destats),
+ typeof(VA.alg_choice),
+ }(du,
+ nothing,
+ nothing,
+ VA.t,
+ VA.k,
+ dprob,
+ VA.alg,
+ VA.interp,
+ VA.dense,
+ 0,
+ VA.destats,
+ VA.alg_choice,
+ VA.retcode)
(Δ′, nothing, nothing)
end
VA[sym, j], ODESolution_getindex_pullback
@@ -77,15 +127,14 @@ end
@adjoint function getindex(VA::ODESolution, i::Int)
function ODESolution_getindex_pullback(Δ)
- Δ′ = [(i == j ? Δ : Zygote.FillArrays.Fill(zero(eltype(x)), size(x)))
- for (x, j) in zip(VA.u, 1:length(VA))]
+ Δ′ = [(i == j ? Δ : Zygote.FillArrays.Fill(zero(eltype(x)), size(x))) for
+ (x, j) in zip(VA.u, 1:length(VA))]
(Δ′, nothing)
end
VA[i], ODESolution_getindex_pullback
end
-@adjoint function Zygote.literal_getproperty(sim::EnsembleSolution,
- ::Val{:u})
+@adjoint function Zygote.literal_getproperty(sim::EnsembleSolution, ::Val{:u})
sim.u, p̄ -> (EnsembleSolution(p̄, 0.0, true, sim.stats),)
end
@@ -95,29 +144,26 @@ end
if i === nothing
throw(error("Zygote AD of purely-symbolic slicing for observed quantities is not yet supported. Work around this by using `A[sym,i]` to access each element sequentially in the function being differentiated."))
else
- Δ′ = [[i == k ? Δ[j] : zero(x[1]) for k in 1:length(x)]
- for (x, j) in zip(VA.u, 1:length(VA))]
+ Δ′ = [[i == k ? Δ[j] : zero(x[1]) for k in 1:length(x)] for
+ (x, j) in zip(VA.u, 1:length(VA))]
(Δ′, nothing)
end
end
VA[sym], ODESolution_getindex_pullback
end
-@adjoint function ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12
- }(u,
- args...) where {T1, T2, T3, T4, T5, T6, T7, T8,
- T9, T10, T11, T12}
- function ODESolutionAdjoint(ȳ)
- (ȳ, ntuple(_ -> nothing, length(args))...)
- end
-
- ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12}(u, args...),
- ODESolutionAdjoint
+@adjoint function ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12}(u,
+ args...) where {T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12}
+ function ODESolutionAdjoint(ȳ)
+ (ȳ, ntuple(_ -> nothing, length(args))...)
+ end
+
+ ODESolution{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12}(u, args...),
+ ODESolutionAdjoint
end
@adjoint function SDEProblem{uType, tType, isinplace, P, NP, F, G, K, ND}(u,
- args...) where
- {uType, tType, isinplace, P, NP, F, G, K, ND}
+ args...) where {uType, tType, isinplace, P, NP, F, G, K, ND}
function SDESolutionAdjoint(ȳ)
(ȳ, ntuple(_ -> nothing, length(args))...)
end
@@ -126,24 +172,14 @@ end
end
@adjoint function NonlinearSolution{T, N, uType, R, P, A, O, uType2}(u,
- args...) where {
- T,
- N,
- uType,
- R,
- P,
- A,
- O,
- uType2,
-}
+ args...) where {T, N, uType, R, P, A, O, uType2}
function NonlinearSolutionAdjoint(ȳ)
(ȳ, ntuple(_ -> nothing, length(args))...)
end
NonlinearSolution{T, N, uType, R, P, A, O, uType2}(u, args...), NonlinearSolutionAdjoint
end
-@adjoint function literal_getproperty(sol::AbstractTimeseriesSolution,
- ::Val{:u})
+@adjoint function literal_getproperty(sol::AbstractTimeseriesSolution, ::Val{:u})
function solu_adjoint(Δ)
zerou = zero(sol.prob.u0)
_Δ = @. ifelse(Δ === nothing, (zerou,), Δ)
@@ -152,8 +188,7 @@ end
sol.u, solu_adjoint
end
-@adjoint function literal_getproperty(sol::SciMLBase.AbstractNoTimeSolution,
- ::Val{:u})
+@adjoint function literal_getproperty(sol::SciMLBase.AbstractNoTimeSolution, ::Val{:u})
function solu_adjoint(Δ)
zerou = zero(sol.prob.u0)
_Δ = @. ifelse(Δ === nothing, zerou, Δ)
@@ -162,8 +197,7 @@ end
sol.u, solu_adjoint
end
-@adjoint function literal_getproperty(sol::SciMLBase.OptimizationSolution,
- ::Val{:u})
+@adjoint function literal_getproperty(sol::SciMLBase.OptimizationSolution, ::Val{:u})
function solu_adjoint(Δ)
zerou = zero(sol.u)
_Δ = @. ifelse(Δ === nothing, zerou, Δ)
@@ -199,8 +233,7 @@ function ∇responsible_map(cx, f, args...)
function ∇responsible_map_internal(Δ)
# Apply pullbacks in reverse order. Needed for correctness if `f` is stateful.
Δf_and_args_zipped = SciMLBase.responsible_map((f, δ) -> f(δ),
- Zygote._tryreverse(SciMLBase.responsible_map,
- backs, Δ)...)
+ Zygote._tryreverse(SciMLBase.responsible_map, backs, Δ)...)
Δf_and_args = Zygote.unzip(Zygote._tryreverse(SciMLBase.responsible_map,
Δf_and_args_zipped))
Δf = reduce(Zygote.accum, Δf_and_args[1])
@@ -213,9 +246,7 @@ end
∇tmap(__context__, f, args...)
end
-@adjoint function SciMLBase.responsible_map(f,
- args::Union{AbstractArray, Tuple
- }...)
+@adjoint function SciMLBase.responsible_map(f, args::Union{AbstractArray, Tuple}...)
∇responsible_map(__context__, f, args...)
end
diff --git a/src/SciMLBase.jl b/src/SciMLBase.jl
index 28a832772c..6f8086d30e 100644
--- a/src/SciMLBase.jl
+++ b/src/SciMLBase.jl
@@ -1,6 +1,5 @@
module SciMLBase
-if isdefined(Base, :Experimental) &&
- isdefined(Base.Experimental, Symbol("@max_methods"))
+if isdefined(Base, :Experimental) && isdefined(Base.Experimental, Symbol("@max_methods"))
@eval Base.Experimental.@max_methods 1
end
using ConstructionBase
@@ -12,31 +11,48 @@ using Statistics
using Distributed
using Markdown
using Printf
-import Preferences
+using Preferences: Preferences
-import Logging, ArrayInterface
-import IteratorInterfaceExtensions
+using Logging: Logging
+using ArrayInterface: ArrayInterface
+using IteratorInterfaceExtensions: IteratorInterfaceExtensions
import CommonSolve: solve, init, step!, solve!
-import FunctionWrappersWrappers
-import RuntimeGeneratedFunctions
-import EnumX
-import TruncatedStacktraces
+using FunctionWrappersWrappers: FunctionWrappersWrappers
+using RuntimeGeneratedFunctions: RuntimeGeneratedFunctions
+using EnumX: EnumX
+using TruncatedStacktraces: TruncatedStacktraces
import ADTypes: AbstractADType
-import FillArrays
-import QuasiMonteCarlo
+using FillArrays: FillArrays
+using QuasiMonteCarlo: QuasiMonteCarlo
using Reexport
using SciMLOperators
using SciMLOperators:
AbstractSciMLOperator,
- IdentityOperator, NullOperator,
- ScaledOperator, AddedOperator, ComposedOperator,
- InvertedOperator, InvertibleOperator
+ IdentityOperator,
+ NullOperator,
+ ScaledOperator,
+ AddedOperator,
+ ComposedOperator,
+ InvertedOperator,
+ InvertibleOperator
import SciMLOperators:
- DEFAULT_UPDATE_FUNC, update_coefficients, update_coefficients!,
- getops, isconstant, iscached, islinear, issquare,
- has_adjoint, has_expmv, has_expmv!, has_exp,
- has_mul, has_mul!, has_ldiv, has_ldiv!
+ DEFAULT_UPDATE_FUNC,
+ update_coefficients,
+ update_coefficients!,
+ getops,
+ isconstant,
+ iscached,
+ islinear,
+ issquare,
+ has_adjoint,
+ has_expmv,
+ has_expmv!,
+ has_exp,
+ has_mul,
+ has_mul!,
+ has_ldiv,
+ has_ldiv!
@reexport using SciMLOperators
@@ -104,10 +120,11 @@ Base for types which define nonlinear solve problems (f(u)=0).
"""
abstract type AbstractNonlinearProblem{uType, isinplace} <: AbstractDEProblem end
abstract type AbstractIntervalNonlinearProblem{uType, isinplace} <:
- AbstractNonlinearProblem{uType,
- isinplace} end
-const AbstractSteadyStateProblem{uType, isinplace} = AbstractNonlinearProblem{uType,
- isinplace}
+ AbstractNonlinearProblem{uType, isinplace} end
+const AbstractSteadyStateProblem{uType, isinplace} = AbstractNonlinearProblem{
+ uType,
+ isinplace,
+}
"""
$(TYPEDEF)
@@ -474,10 +491,12 @@ Union of all base solution types.
Uses a Union so that solution types can be `<: AbstractArray`
"""
-const AbstractSciMLSolution = Union{AbstractTimeseriesSolution,
+const AbstractSciMLSolution = Union{
+ AbstractTimeseriesSolution,
AbstractNoTimeSolution,
AbstractEnsembleSolution,
- AbstractNoiseProcess}
+ AbstractNoiseProcess,
+}
"""
$(TYPEDEF)
@@ -539,14 +558,15 @@ abstract type AbstractPDETimeSeriesSolution{T, N, S, D} <:
"""
$(TYPEDEF)
"""
-abstract type AbstractPDENoTimeSolution{T, N, S, D} <:
- AbstractNoTimeSolution{T, N} end
+abstract type AbstractPDENoTimeSolution{T, N, S, D} <: AbstractNoTimeSolution{T, N} end
"""
$(TYPEDEF)
"""
-const AbstractPDESolution{T, N, S, D} = Union{AbstractPDETimeSeriesSolution{T, N, S, D},
- AbstractPDENoTimeSolution{T, N, S, D}}
+const AbstractPDESolution{T, N, S, D} = Union{
+ AbstractPDETimeSeriesSolution{T, N, S, D},
+ AbstractPDENoTimeSolution{T, N, S, D},
+}
"""
$(TYPEDEF)
@@ -582,16 +602,14 @@ $(TYPEDEF)
Base for types defining differential equation functions.
"""
-abstract type AbstractDiffEqFunction{iip} <:
- AbstractSciMLFunction{iip} end
+abstract type AbstractDiffEqFunction{iip} <: AbstractSciMLFunction{iip} end
"""
$(TYPEDEF)
Base for types defining integrand functions.
"""
-abstract type AbstractIntegralFunction{iip} <:
- AbstractSciMLFunction{iip} end
+abstract type AbstractIntegralFunction{iip} <: AbstractSciMLFunction{iip} end
"""
$(TYPEDEF)
@@ -652,24 +670,26 @@ function unwrapped_f(f::FunctionWrappersWrappers.FunctionWrappersWrapper)
unwrapped_f(f.fw[1].obj[])
end
-function specialization(::Union{ODEFunction{iip, specialize},
- SDEFunction{iip, specialize}, DDEFunction{iip, specialize},
- SDDEFunction{iip, specialize},
- DAEFunction{iip, specialize},
- DynamicalODEFunction{iip, specialize},
- SplitFunction{iip, specialize},
- DynamicalSDEFunction{iip, specialize},
- SplitSDEFunction{iip, specialize},
- DynamicalDDEFunction{iip, specialize},
- DiscreteFunction{iip, specialize},
- ImplicitDiscreteFunction{iip, specialize},
- RODEFunction{iip, specialize},
- NonlinearFunction{iip, specialize},
- OptimizationFunction{iip, specialize},
- BVPFunction{iip, specialize},
- IntegralFunction{iip, specialize},
- BatchIntegralFunction{iip, specialize}}) where {iip,
- specialize}
+function specialization(::Union{
+ ODEFunction{iip, specialize},
+ SDEFunction{iip, specialize},
+ DDEFunction{iip, specialize},
+ SDDEFunction{iip, specialize},
+ DAEFunction{iip, specialize},
+ DynamicalODEFunction{iip, specialize},
+ SplitFunction{iip, specialize},
+ DynamicalSDEFunction{iip, specialize},
+ SplitSDEFunction{iip, specialize},
+ DynamicalDDEFunction{iip, specialize},
+ DiscreteFunction{iip, specialize},
+ ImplicitDiscreteFunction{iip, specialize},
+ RODEFunction{iip, specialize},
+ NonlinearFunction{iip, specialize},
+ OptimizationFunction{iip, specialize},
+ BVPFunction{iip, specialize},
+ IntegralFunction{iip, specialize},
+ BatchIntegralFunction{iip, specialize},
+ }) where {iip, specialize}
specialize
end
@@ -731,7 +751,7 @@ include("callbacks.jl")
include("deprecated.jl")
-import PrecompileTools
+using PrecompileTools: PrecompileTools
PrecompileTools.@compile_workload begin
function lorenz(du, u, p, t)
@@ -769,8 +789,12 @@ export isinplace
export solve, solve!, init, discretize, symbolic_discretize
-export LinearProblem, NonlinearProblem, IntervalNonlinearProblem,
- IntegralProblem, SampledIntegralProblem, OptimizationProblem,
+export LinearProblem,
+ NonlinearProblem,
+ IntervalNonlinearProblem,
+ IntegralProblem,
+ SampledIntegralProblem,
+ OptimizationProblem,
NonlinearLeastSquaresProblem
export DiscreteProblem, ImplicitDiscreteProblem
@@ -778,15 +802,13 @@ export SteadyStateProblem, SteadyStateSolution
export NoiseProblem
export ODEProblem, ODESolution
export DynamicalODEFunction,
- DynamicalODEProblem,
- SecondOrderODEProblem, SplitFunction, SplitODEProblem
+ DynamicalODEProblem, SecondOrderODEProblem, SplitFunction, SplitODEProblem
export SplitSDEProblem
export DynamicalSDEFunction, DynamicalSDEProblem
export RODEProblem, RODESolution, SDEProblem
export DAEProblem, DAESolution
export DDEProblem
-export DynamicalDDEFunction, DynamicalDDEProblem,
- SecondOrderDDEProblem
+export DynamicalDDEFunction, DynamicalDDEProblem, SecondOrderDDEProblem
export SDDEProblem
export PDEProblem
export IncrementingODEProblem
@@ -795,10 +817,22 @@ export BVProblem, TwoPointBVProblem
export remake
-export ODEFunction, DiscreteFunction, ImplicitDiscreteFunction, SplitFunction, DAEFunction,
- DDEFunction, SDEFunction, SplitSDEFunction, RODEFunction, SDDEFunction,
- IncrementingODEFunction, NonlinearFunction, IntervalNonlinearFunction, BVPFunction,
- IntegralFunction, BatchIntegralFunction
+export ODEFunction,
+ DiscreteFunction,
+ ImplicitDiscreteFunction,
+ SplitFunction,
+ DAEFunction,
+ DDEFunction,
+ SDEFunction,
+ SplitSDEFunction,
+ RODEFunction,
+ SDDEFunction,
+ IncrementingODEFunction,
+ NonlinearFunction,
+ IntervalNonlinearFunction,
+ BVPFunction,
+ IntegralFunction,
+ BatchIntegralFunction
export OptimizationFunction
@@ -812,17 +846,43 @@ export AffineDiffEqOperator, DiffEqScaledOperator
export DiffEqScalar, DiffEqArrayOperator, DiffEqIdentity
-export step!, deleteat!, addat!, get_tmp_cache,
- full_cache, user_cache, u_cache, du_cache,
- rand_cache, ratenoise_cache,
- resize_non_user_cache!, deleteat_non_user_cache!, addat_non_user_cache!,
+export step!,
+ deleteat!,
+ addat!,
+ get_tmp_cache,
+ full_cache,
+ user_cache,
+ u_cache,
+ du_cache,
+ rand_cache,
+ ratenoise_cache,
+ resize_non_user_cache!,
+ deleteat_non_user_cache!,
+ addat_non_user_cache!,
terminate!,
- add_tstop!, has_tstop, first_tstop, pop_tstop!,
- add_saveat!, set_abstol!,
- set_reltol!, get_du, get_du!, get_dt, get_proposed_dt, set_proposed_dt!,
- u_modified!, savevalues!, reinit!, auto_dt_reset!, set_t!,
- set_u!, check_error, change_t_via_interpolation!, addsteps!,
- isdiscrete, reeval_internals_due_to_modification!
+ add_tstop!,
+ has_tstop,
+ first_tstop,
+ pop_tstop!,
+ add_saveat!,
+ set_abstol!,
+ set_reltol!,
+ get_du,
+ get_du!,
+ get_dt,
+ get_proposed_dt,
+ set_proposed_dt!,
+ u_modified!,
+ savevalues!,
+ reinit!,
+ auto_dt_reset!,
+ set_t!,
+ set_u!,
+ check_error,
+ change_t_via_interpolation!,
+ addsteps!,
+ isdiscrete,
+ reeval_internals_due_to_modification!
export ContinuousCallback, DiscreteCallback, CallbackSet, VectorContinuousCallback
diff --git a/src/alg_traits.jl b/src/alg_traits.jl
index 154cde9c50..afea70fbe0 100644
--- a/src/alg_traits.jl
+++ b/src/alg_traits.jl
@@ -1,5 +1,5 @@
"""
- isautodifferentiable(alg::AbstractDEAlgorithm)
+isautodifferentiable(alg::AbstractDEAlgorithm)
Trait declaration for whether an algorithm is compatible with
direct automatic differentiation, i.e. can have algorithms like
@@ -11,7 +11,7 @@ Defaults to false as only pure-Julia algorithms can have this be true.
isautodifferentiable(alg::AbstractSciMLAlgorithm) = false
"""
- forwarddiffs_model(alg::AbstractDEAlgorithm)
+forwarddiffs_model(alg::AbstractDEAlgorithm)
Trait declaration for whether an algorithm uses ForwardDiff.jl
on the model function is called with ForwardDiff.jl
@@ -21,7 +21,7 @@ Defaults to false as only pure-Julia algorithms can have this be true.
forwarddiffs_model(alg::AbstractSciMLAlgorithm) = false
"""
- forwarddiffs_model_time(alg::AbstractDEAlgorithm)
+forwarddiffs_model_time(alg::AbstractDEAlgorithm)
Trait declaration for whether an algorithm uses ForwardDiff.jl
on the model `f(u,p,t)` function is called with ForwardDiff.jl on the `t` argument.
@@ -32,7 +32,7 @@ have this as true
forwarddiffs_model_time(alg::AbstractSciMLAlgorithm) = false
"""
- allows_arbitrary_number_types(alg::AbstractDEAlgorithm)
+allows_arbitrary_number_types(alg::AbstractDEAlgorithm)
Trait declaration for whether an algorithm is compatible with
direct automatic differentiation, i.e. can have algorithms like
@@ -44,7 +44,7 @@ Defaults to false as only pure-Julia algorithms can have this be true.
allows_arbitrary_number_types(alg::AbstractSciMLAlgorithm) = false
"""
- allowscomplex(alg::AbstractDEAlgorithm)
+allowscomplex(alg::AbstractDEAlgorithm)
Trait declaration for whether an algorithm is compatible with
having complex numbers as the state variables.
@@ -54,7 +54,7 @@ Defaults to false.
allowscomplex(alg::AbstractSciMLAlgorithm) = false
"""
- isadaptive(alg::AbstractDEAlgorithm)
+isadaptive(alg::AbstractDEAlgorithm)
Trait declaration for whether an algorithm uses adaptivity,
i.e. has a non-quasi-static compute graph.
@@ -65,7 +65,7 @@ isadaptive(alg::AbstractDEAlgorithm) = true
# Default to assuming adaptive, safer error("Adaptivity algorithm trait not set.")
"""
- isdiscrete(alg::AbstractDEAlgorithm)
+isdiscrete(alg::AbstractDEAlgorithm)
Trait declaration for whether an algorithm allows for
discrete state values, such as integers.
@@ -75,10 +75,10 @@ Defaults to false.
isdiscrete(alg::AbstractDEAlgorithm) = false
"""
- allowsbounds(opt)
+allowsbounds(opt)
-Trait declaration for whether an optimizer allows for
-box constraints passed with `lb` and `ub` in
+Trait declaration for whether an optimizer allows for
+box constraints passed with `lb` and `ub` in
`OptimizationProblem`.
Defaults to false.
@@ -86,10 +86,10 @@ Defaults to false.
allowsbounds(opt) = false
"""
- requiresbounds(opt)
+requiresbounds(opt)
-Trait declaration for whether an optimizer requires
-box constraints passed with `lb` and `ub` in
+Trait declaration for whether an optimizer requires
+box constraints passed with `lb` and `ub` in
`OptimizationProblem`.
Defaults to false.
@@ -97,10 +97,10 @@ Defaults to false.
requiresbounds(opt) = false
"""
- allowsconstraints(opt)
+allowsconstraints(opt)
-Trait declaration for whether an optimizer allows
-non-linear constraints specified in `cons` in
+Trait declaration for whether an optimizer allows
+non-linear constraints specified in `cons` in
`OptimizationFunction`.
Defaults to false.
@@ -108,10 +108,10 @@ Defaults to false.
allowsconstraints(opt) = false
"""
- requiresconstraints(opt)
+requiresconstraints(opt)
-Trait declaration for whether an optimizer
-requires non-linear constraints specified in
+Trait declaration for whether an optimizer
+requires non-linear constraints specified in
`cons` in `OptimizationFunction`.
Defaults to false.
@@ -119,10 +119,10 @@ Defaults to false.
requiresconstraints(opt) = false
"""
- allowscallback(opt)
+allowscallback(opt)
-Trait declaration for whether an optimizer
-supports passing a `callback` to `solve`
+Trait declaration for whether an optimizer
+supports passing a `callback` to `solve`
for an `OptimizationProblem`.
Defaults to true.
@@ -130,7 +130,7 @@ Defaults to true.
allowscallback(opt) = true
"""
- alg_order(alg)
+alg_order(alg)
The theoretic convergence order of the algorithm.
"""
diff --git a/src/callbacks.jl b/src/callbacks.jl
index edab070f65..dbf13ccd31 100644
--- a/src/callbacks.jl
+++ b/src/callbacks.jl
@@ -105,78 +105,128 @@ struct ContinuousCallback{F1, F2, F3, F4, F5, T, T2, T3, I, R} <: AbstractContin
abstol::T
reltol::T2
repeat_nudge::T3
- function ContinuousCallback(condition::F1, affect!::F2, affect_neg!::F3,
- initialize::F4, finalize::F5, idxs::I, rootfind,
- interp_points, save_positions, dtrelax::R, abstol::T,
- reltol::T2,
- repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2, T3, I, R
- }
+ function ContinuousCallback(condition::F1,
+ affect!::F2,
+ affect_neg!::F3,
+ initialize::F4,
+ finalize::F5,
+ idxs::I,
+ rootfind,
+ interp_points,
+ save_positions,
+ dtrelax::R,
+ abstol::T,
+ reltol::T2,
+ repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2, T3, I, R}
new{F1, F2, F3, F4, F5, T, T2, T3, I, R}(condition,
- affect!, affect_neg!,
- initialize, finalize, idxs, rootfind,
+ affect!,
+ affect_neg!,
+ initialize,
+ finalize,
+ idxs,
+ rootfind,
interp_points,
BitArray(collect(save_positions)),
- dtrelax, abstol, reltol, repeat_nudge)
+ dtrelax,
+ abstol,
+ reltol,
+ repeat_nudge)
end
end
-function ContinuousCallback(condition, affect!, affect_neg!;
- initialize = INITIALIZE_DEFAULT,
- finalize = FINALIZE_DEFAULT,
- idxs = nothing,
- rootfind = LeftRootFind,
- save_positions = (true, true),
- interp_points = 10,
- dtrelax = 1,
- abstol = 10eps(), reltol = 0,
- repeat_nudge = 1 // 100)
- ContinuousCallback(condition, affect!, affect_neg!, initialize, finalize,
+function ContinuousCallback(condition,
+ affect!,
+ affect_neg!;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ idxs = nothing,
+ rootfind = LeftRootFind,
+ save_positions = (true, true),
+ interp_points = 10,
+ dtrelax = 1,
+ abstol = 10eps(),
+ reltol = 0,
+ repeat_nudge = 1 // 100,)
+ ContinuousCallback(condition,
+ affect!,
+ affect_neg!,
+ initialize,
+ finalize,
idxs,
- rootfind, interp_points,
+ rootfind,
+ interp_points,
save_positions,
- dtrelax, abstol, reltol, repeat_nudge)
+ dtrelax,
+ abstol,
+ reltol,
+ repeat_nudge)
end
-function ContinuousCallback(condition, affect!;
- initialize = INITIALIZE_DEFAULT,
- finalize = FINALIZE_DEFAULT,
- idxs = nothing,
- rootfind = LeftRootFind,
- save_positions = (true, true),
- affect_neg! = affect!,
- interp_points = 10,
- dtrelax = 1,
- abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100)
- ContinuousCallback(condition, affect!, affect_neg!, initialize, finalize, idxs,
- rootfind, interp_points,
+function ContinuousCallback(condition,
+ affect!;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ idxs = nothing,
+ rootfind = LeftRootFind,
+ save_positions = (true, true),
+ affect_neg! = affect!,
+ interp_points = 10,
+ dtrelax = 1,
+ abstol = 10eps(),
+ reltol = 0,
+ repeat_nudge = 1 // 100,)
+ ContinuousCallback(condition,
+ affect!,
+ affect_neg!,
+ initialize,
+ finalize,
+ idxs,
+ rootfind,
+ interp_points,
collect(save_positions),
- dtrelax, abstol, reltol, repeat_nudge)
+ dtrelax,
+ abstol,
+ reltol,
+ repeat_nudge)
end
TruncatedStacktraces.@truncate_stacktrace ContinuousCallback
"""
```julia
-VectorContinuousCallback(condition,affect!,affect_neg!,len;
- initialize = INITIALIZE_DEFAULT,
- finalize = FINALIZE_DEFAULT,
- idxs = nothing,
- rootfind=LeftRootFind,
- save_positions=(true,true),
- interp_points=10,
- abstol=10eps(),reltol=0,repeat_nudge = 1//100)
+VectorContinuousCallback(
+ condition,
+ affect!,
+ affect_neg!,
+ len;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ idxs = nothing,
+ rootfind = LeftRootFind,
+ save_positions = (true, true),
+ interp_points = 10,
+ abstol = 10eps(),
+ reltol = 0,
+ repeat_nudge = 1 // 100,
+)
```
```julia
-VectorContinuousCallback(condition,affect!,len;
- initialize = INITIALIZE_DEFAULT,
- finalize = FINALIZE_DEFAULT,
- idxs = nothing,
- rootfind=LeftRootFind,
- save_positions=(true,true),
- affect_neg! = affect!,
- interp_points=10,
- abstol=10eps(),reltol=0,repeat_nudge=1//100)
+VectorContinuousCallback(
+ condition,
+ affect!,
+ len;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ idxs = nothing,
+ rootfind = LeftRootFind,
+ save_positions = (true, true),
+ affect_neg! = affect!,
+ interp_points = 10,
+ abstol = 10eps(),
+ reltol = 0,
+ repeat_nudge = 1 // 100,
+)
```
This is also a subtype of `AbstractContinuousCallback`. `CallbackSet` is not feasible when you have many callbacks,
@@ -185,12 +235,12 @@ multiple events.
# Arguments
-- `condition`: This is a function `condition(out, u, t, integrator)` which should save the condition value in the array `out`
- at the right index. Maximum index of `out` should be specified in the `len` property of callback. So, this way you can have
- a chain of `len` events, which would cause the `i`th event to trigger when `out[i] = 0`.
-- `affect!`: This is a function `affect!(integrator, event_index)` which lets you modify `integrator` and it tells you about
- which event occurred using `event_idx` i.e. gives you index `i` for which `out[i]` came out to be zero.
-- `len`: Number of callbacks chained. This is compulsory to be specified.
+ - `condition`: This is a function `condition(out, u, t, integrator)` which should save the condition value in the array `out`
+ at the right index. Maximum index of `out` should be specified in the `len` property of callback. So, this way you can have
+ a chain of `len` events, which would cause the `i`th event to trigger when `out[i] = 0`.
+ - `affect!`: This is a function `affect!(integrator, event_index)` which lets you modify `integrator` and it tells you about
+ which event occurred using `event_idx` i.e. gives you index `i` for which `out[i]` came out to be zero.
+ - `len`: Number of callbacks chained. This is compulsory to be specified.
Rest of the arguments have the same meaning as in [`ContinuousCallback`](@ref).
"""
@@ -210,85 +260,132 @@ struct VectorContinuousCallback{F1, F2, F3, F4, F5, T, T2, T3, I, R} <:
abstol::T
reltol::T2
repeat_nudge::T3
- function VectorContinuousCallback(condition::F1, affect!::F2, affect_neg!::F3, len::Int,
- initialize::F4, finalize::F5, idxs::I, rootfind,
- interp_points, save_positions, dtrelax::R,
- abstol::T, reltol::T2,
- repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2,
- T3, I, R}
+ function VectorContinuousCallback(condition::F1,
+ affect!::F2,
+ affect_neg!::F3,
+ len::Int,
+ initialize::F4,
+ finalize::F5,
+ idxs::I,
+ rootfind,
+ interp_points,
+ save_positions,
+ dtrelax::R,
+ abstol::T,
+ reltol::T2,
+ repeat_nudge::T3) where {F1, F2, F3, F4, F5, T, T2, T3, I, R}
new{F1, F2, F3, F4, F5, T, T2, T3, I, R}(condition,
- affect!, affect_neg!, len,
- initialize, finalize, idxs, rootfind,
+ affect!,
+ affect_neg!,
+ len,
+ initialize,
+ finalize,
+ idxs,
+ rootfind,
interp_points,
BitArray(collect(save_positions)),
- dtrelax, abstol, reltol, repeat_nudge)
+ dtrelax,
+ abstol,
+ reltol,
+ repeat_nudge)
end
end
-function VectorContinuousCallback(condition, affect!, affect_neg!, len;
- initialize = INITIALIZE_DEFAULT,
- finalize = FINALIZE_DEFAULT,
- idxs = nothing,
- rootfind = LeftRootFind,
- save_positions = (true, true),
- interp_points = 10,
- dtrelax = 1,
- abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100)
- VectorContinuousCallback(condition, affect!, affect_neg!, len,
- initialize, finalize,
+function VectorContinuousCallback(condition,
+ affect!,
+ affect_neg!,
+ len;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ idxs = nothing,
+ rootfind = LeftRootFind,
+ save_positions = (true, true),
+ interp_points = 10,
+ dtrelax = 1,
+ abstol = 10eps(),
+ reltol = 0,
+ repeat_nudge = 1 // 100,)
+ VectorContinuousCallback(condition,
+ affect!,
+ affect_neg!,
+ len,
+ initialize,
+ finalize,
idxs,
- rootfind, interp_points,
- save_positions, dtrelax,
- abstol, reltol, repeat_nudge)
+ rootfind,
+ interp_points,
+ save_positions,
+ dtrelax,
+ abstol,
+ reltol,
+ repeat_nudge)
end
-function VectorContinuousCallback(condition, affect!, len;
- initialize = INITIALIZE_DEFAULT,
- finalize = FINALIZE_DEFAULT,
- idxs = nothing,
- rootfind = LeftRootFind,
- save_positions = (true, true),
- affect_neg! = affect!,
- interp_points = 10,
- dtrelax = 1,
- abstol = 10eps(), reltol = 0, repeat_nudge = 1 // 100)
- VectorContinuousCallback(condition, affect!, affect_neg!, len, initialize, finalize,
+function VectorContinuousCallback(condition,
+ affect!,
+ len;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ idxs = nothing,
+ rootfind = LeftRootFind,
+ save_positions = (true, true),
+ affect_neg! = affect!,
+ interp_points = 10,
+ dtrelax = 1,
+ abstol = 10eps(),
+ reltol = 0,
+ repeat_nudge = 1 // 100,)
+ VectorContinuousCallback(condition,
+ affect!,
+ affect_neg!,
+ len,
+ initialize,
+ finalize,
idxs,
- rootfind, interp_points,
+ rootfind,
+ interp_points,
collect(save_positions),
- dtrelax, abstol, reltol, repeat_nudge)
+ dtrelax,
+ abstol,
+ reltol,
+ repeat_nudge)
end
TruncatedStacktraces.@truncate_stacktrace VectorContinuousCallback
"""
```julia
-DiscreteCallback(condition,affect!;
- initialize = INITIALIZE_DEFAULT,
- finalize = FINALIZE_DEFAULT,
- save_positions=(true,true))
+DiscreteCallback(
+ condition,
+ affect!;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ save_positions = (true, true),
+)
```
# Arguments
-- `condition`: This is a function `condition(u,t,integrator)` for declaring when
- the callback should be used. A callback is initiated if the condition evaluates
- to `true`. See the [Integrator Interface](@ref integrator) documentation for information about `integrator`.
- - `affect!`: This is the function `affect!(integrator)` where one is allowed to
- modify the current state of the integrator. For more information on what can
- be done, see the [Integrator Interface](@ref integrator) manual page.
-- `save_positions`: Boolean tuple for whether to save before and after the `affect!`.
- This saving will occur just before and after the event, only at event times, and
- does not depend on options like `saveat`, `save_everystep`, etc. (i.e. if
- `saveat=[1.0,2.0,3.0]`, this can still add a save point at `2.1` if true).
- For discontinuous changes like a modification to `u` to be
- handled correctly (without error), one should set `save_positions=(true,true)`.
-- `initialize`: This is a function `(c,u,t,integrator)` which can be used to initialize
- the state of the callback `c`. It should modify the argument `c` and the return is
- ignored.
-- `finalize`: This is a function `(c,u,t,integrator)` which can be used to finalize
- the state of the callback `c`. It should can the argument `c` and the return is
- ignored.
+ - `condition`: This is a function `condition(u,t,integrator)` for declaring when
+ the callback should be used. A callback is initiated if the condition evaluates
+ to `true`. See the [Integrator Interface](@ref integrator) documentation for information about `integrator`.
+
+ + `affect!`: This is the function `affect!(integrator)` where one is allowed to
+ modify the current state of the integrator. For more information on what can
+ be done, see the [Integrator Interface](@ref integrator) manual page.
+
+ - `save_positions`: Boolean tuple for whether to save before and after the `affect!`.
+ This saving will occur just before and after the event, only at event times, and
+ does not depend on options like `saveat`, `save_everystep`, etc. (i.e. if
+ `saveat=[1.0,2.0,3.0]`, this can still add a save point at `2.1` if true).
+ For discontinuous changes like a modification to `u` to be
+ handled correctly (without error), one should set `save_positions=(true,true)`.
+ - `initialize`: This is a function `(c,u,t,integrator)` which can be used to initialize
+ the state of the callback `c`. It should modify the argument `c` and the return is
+ ignored.
+ - `finalize`: This is a function `(c,u,t,integrator)` which can be used to finalize
+ the state of the callback `c`. It should can the argument `c` and the return is
+ ignored.
"""
struct DiscreteCallback{F1, F2, F3, F4} <: AbstractDiscreteCallback
condition::F1
@@ -296,17 +393,23 @@ struct DiscreteCallback{F1, F2, F3, F4} <: AbstractDiscreteCallback
initialize::F3
finalize::F4
save_positions::BitArray{1}
- function DiscreteCallback(condition::F1, affect!::F2,
- initialize::F3, finalize::F4,
- save_positions) where {F1, F2, F3, F4}
+ function DiscreteCallback(condition::F1,
+ affect!::F2,
+ initialize::F3,
+ finalize::F4,
+ save_positions) where {F1, F2, F3, F4}
new{F1, F2, F3, F4}(condition,
- affect!, initialize, finalize,
+ affect!,
+ initialize,
+ finalize,
BitArray(collect(save_positions)))
end
end
-function DiscreteCallback(condition, affect!;
- initialize = INITIALIZE_DEFAULT, finalize = FINALIZE_DEFAULT,
- save_positions = (true, true))
+function DiscreteCallback(condition,
+ affect!;
+ initialize = INITIALIZE_DEFAULT,
+ finalize = FINALIZE_DEFAULT,
+ save_positions = (true, true),)
DiscreteCallback(condition, affect!, initialize, finalize, save_positions)
end
@@ -364,6 +467,7 @@ end
split_callbacks(cs, (ds..., d), args...)
end
@inline function split_callbacks(cs, ds, d::CallbackSet, args...)
- split_callbacks((cs..., d.continuous_callbacks...), (ds..., d.discrete_callbacks...),
+ split_callbacks((cs..., d.continuous_callbacks...),
+ (ds..., d.discrete_callbacks...),
args...)
end
diff --git a/src/ensemble/basic_ensemble_solve.jl b/src/ensemble/basic_ensemble_solve.jl
index ece01eb687..cdfb3990f6 100644
--- a/src/ensemble/basic_ensemble_solve.jl
+++ b/src/ensemble/basic_ensemble_solve.jl
@@ -24,12 +24,13 @@ $(TYPEDEF)
struct EnsembleSerial <: BasicEnsembleAlgorithm end
function merge_stats(us)
- st = Iterators.filter(!isnothing, (hasproperty(x, :stats) ? x.stats : nothing for x in us))
+ st = Iterators.filter(!isnothing,
+ (hasproperty(x, :stats) ? x.stats : nothing for x in us))
isempty(st) && return nothing
reduce(merge, st)
end
-mutable struct AggregateLogger{T<:Logging.AbstractLogger} <: Logging.AbstractLogger
+mutable struct AggregateLogger{T <: Logging.AbstractLogger} <: Logging.AbstractLogger
progress::Dict{Symbol, Float64}
done_counter::Int
total::Float64
@@ -37,12 +38,22 @@ mutable struct AggregateLogger{T<:Logging.AbstractLogger} <: Logging.AbstractLog
lock::ReentrantLock
logger::T
end
-AggregateLogger(logger::Logging.AbstractLogger) = AggregateLogger(Dict{Symbol, Float64}(),0 , 0.0, 0.0, ReentrantLock(), logger)
+function AggregateLogger(logger::Logging.AbstractLogger)
+ AggregateLogger(Dict{Symbol, Float64}(), 0, 0.0, 0.0, ReentrantLock(), logger)
+end
-function Logging.handle_message(l::AggregateLogger, level, message, _module, group, id, file, line; kwargs...)
+function Logging.handle_message(l::AggregateLogger,
+ level,
+ message,
+ _module,
+ group,
+ id,
+ file,
+ line;
+ kwargs...,)
if convert(Logging.LogLevel, level) == Logging.LogLevel(-1) && haskey(kwargs, :progress)
pr = kwargs[:progress]
- if trylock(l.lock) || (pr == "done" && lock(l.lock)===nothing)
+ if trylock(l.lock) || (pr == "done" && lock(l.lock) === nothing)
try
if pr == "done"
pr = 1.0
@@ -50,9 +61,9 @@ function Logging.handle_message(l::AggregateLogger, level, message, _module, gro
end
len = length(l.progress)
if haskey(l.progress, id)
- l.total += (pr-l.progress[id])/len
+ l.total += (pr - l.progress[id]) / len
else
- l.total = l.total*(len/(len+1)) + pr/(len+1)
+ l.total = l.total * (len / (len + 1)) + pr / (len + 1)
len += 1
end
l.progress[id] = pr
@@ -61,19 +72,19 @@ function Logging.handle_message(l::AggregateLogger, level, message, _module, gro
# @show tot l.total l.total ≈ tot
curr_time = time()
if l.done_counter >= len
- tot="done"
+ tot = "done"
empty!(l.progress)
l.done_counter = 0
l.print_time = 0.0
- elseif curr_time-l.print_time > 0.1
+ elseif curr_time - l.print_time > 0.1
tot = l.total
l.print_time = curr_time
else
return
end
- id=:total
- message="Total"
- kwargs=merge(values(kwargs), (progress=tot,))
+ id = :total
+ message = "Total"
+ kwargs = merge(values(kwargs), (progress = tot,))
finally
unlock(l.lock)
end
@@ -81,15 +92,23 @@ function Logging.handle_message(l::AggregateLogger, level, message, _module, gro
return
end
end
- Logging.handle_message(l.logger, level, message, _module, group, id, file, line; kwargs...)
+ Logging.handle_message(l.logger,
+ level,
+ message,
+ _module,
+ group,
+ id,
+ file,
+ line;
+ kwargs...,)
end
Logging.shouldlog(l::AggregateLogger, args...) = Logging.shouldlog(l.logger, args...)
Logging.min_enabled_level(l::AggregateLogger) = Logging.min_enabled_level(l.logger)
Logging.catch_exceptions(l::AggregateLogger) = Logging.catch_exceptions(l.logger)
function __solve(prob::AbstractEnsembleProblem,
- alg::Union{AbstractDEAlgorithm, Nothing};
- kwargs...)
+ alg::Union{AbstractDEAlgorithm, Nothing};
+ kwargs...,)
if alg isa EnsembleAlgorithm
# Assume DifferentialEquations.jl is being used, so default alg
ensemblealg = alg
@@ -107,20 +126,30 @@ tighten_container_eltype(u::Vector{Any}) = map(identity, u)
tighten_container_eltype(u) = u
function __solve(prob::EnsembleProblem{<:AbstractVector{<:AbstractSciMLProblem}},
- alg::Union{AbstractDEAlgorithm, Nothing},
- ensemblealg::BasicEnsembleAlgorithm; kwargs...)
+ alg::Union{AbstractDEAlgorithm, Nothing},
+ ensemblealg::BasicEnsembleAlgorithm;
+ kwargs...,)
# TODO: @invoke
- invoke(__solve, Tuple{AbstractEnsembleProblem, typeof(alg), typeof(ensemblealg)},
- prob, alg, ensemblealg; trajectories = length(prob.prob), kwargs...)
+ invoke(__solve,
+ Tuple{AbstractEnsembleProblem, typeof(alg), typeof(ensemblealg)},
+ prob,
+ alg,
+ ensemblealg;
+ trajectories = length(prob.prob),
+ kwargs...,)
end
function __solve(prob::AbstractEnsembleProblem,
- alg::A,
- ensemblealg::BasicEnsembleAlgorithm;
- trajectories, batch_size = trajectories, progress_aggregate=true,
- pmap_batch_size = batch_size ÷ 100 > 0 ? batch_size ÷ 100 : 1, kwargs...) where {A}
- logger = progress_aggregate ? AggregateLogger(Logging.current_logger()) : Logging.current_logger()
-
+ alg::A,
+ ensemblealg::BasicEnsembleAlgorithm;
+ trajectories,
+ batch_size = trajectories,
+ progress_aggregate = true,
+ pmap_batch_size = batch_size ÷ 100 > 0 ? batch_size ÷ 100 : 1,
+ kwargs...,) where {A}
+ logger = progress_aggregate ? AggregateLogger(Logging.current_logger()) :
+ Logging.current_logger()
+
Logging.with_logger(logger) do
num_batches = trajectories ÷ batch_size
num_batches < 1 &&
@@ -131,14 +160,20 @@ function __solve(prob::AbstractEnsembleProblem,
name = get(kwargs, :progress_name, "Ensemble")
for i in 1:trajectories
msg = "$name #$i"
- Logging.@logmsg(Logging.LogLevel(-1), msg, _id=Symbol("SciMLBase_$i"), progress=0)
+ Logging.@logmsg(Logging.LogLevel(-1),
+ msg,
+ _id=Symbol("SciMLBase_$i"),
+ progress=0)
end
end
-
if num_batches == 1 && prob.reduction === DEFAULT_REDUCTION
- elapsed_time = @elapsed u = solve_batch(prob, alg, ensemblealg, 1:trajectories,
- pmap_batch_size; kwargs...)
+ elapsed_time = @elapsed u = solve_batch(prob,
+ alg,
+ ensemblealg,
+ 1:trajectories,
+ pmap_batch_size;
+ kwargs...,)
_u = tighten_container_eltype(u)
stats = merge_stats(_u)
return EnsembleSolution(_u, elapsed_time, true, stats)
@@ -160,7 +195,12 @@ function __solve(prob::AbstractEnsembleProblem,
else
II = (batch_size * (i - 1) + 1):(batch_size * i)
end
- batch_data = solve_batch(prob, alg, ensemblealg, II, pmap_batch_size; kwargs...)
+ batch_data = solve_batch(prob,
+ alg,
+ ensemblealg,
+ II,
+ pmap_batch_size;
+ kwargs...)
u, converged = prob.reduction(u, batch_data, II)
end
end
@@ -181,7 +221,7 @@ function batch_func(i, prob, alg; kwargs...)
name = get(kwargs, :progress_name, "Ensemble")
progress_name = "$name #$i"
progress_id = Symbol("SciMLBase_$i")
- kwargs = (kwargs..., progress_name=progress_name, progress_id=progress_id)
+ kwargs = (kwargs..., progress_name = progress_name, progress_id = progress_id)
end
x = prob.output_func(solve(new_prob, alg; kwargs...), i)
if !(x isa Tuple)
@@ -207,8 +247,12 @@ function batch_func(i, prob, alg; kwargs...)
_x[1]
end
-function solve_batch(prob, alg, ensemblealg::EnsembleDistributed, II, pmap_batch_size;
- kwargs...)
+function solve_batch(prob,
+ alg,
+ ensemblealg::EnsembleDistributed,
+ II,
+ pmap_batch_size;
+ kwargs...,)
wp = CachingPool(workers())
# Fix the return type of pmap
@@ -219,7 +263,7 @@ function solve_batch(prob, alg, ensemblealg::EnsembleDistributed, II, pmap_batch
T = Core.Compiler.return_type(f,Tuple{})
=#
- batch_data = pmap(wp, II, batch_size = pmap_batch_size) do i
+ batch_data = pmap(wp, II; batch_size = pmap_batch_size) do i
batch_func(i, prob, alg; kwargs...)
end
@@ -242,8 +286,12 @@ function SciMLBase.solve_batch(prob, alg, ::EnsembleSerial, II, pmap_batch_size;
SciMLBase.tighten_container_eltype(batch_data)
end
-function solve_batch(prob, alg, ensemblealg::EnsembleThreads, II, pmap_batch_size;
- kwargs...)
+function solve_batch(prob,
+ alg,
+ ensemblealg::EnsembleThreads,
+ II,
+ pmap_batch_size;
+ kwargs...,)
nthreads = min(Threads.nthreads(), length(II))
if length(II) == 1 || nthreads == 1
return solve_batch(prob, alg, EnsembleSerial(), II, pmap_batch_size; kwargs...)
@@ -262,8 +310,7 @@ function solve_batch(prob, alg, ensemblealg::EnsembleThreads, II, pmap_batch_siz
end
function tmap(f, args...)
- batch_data = Vector{Core.Compiler.return_type(f, Tuple{typeof.(getindex.(args, 1))...})
- }(undef,
+ batch_data = Vector{Core.Compiler.return_type(f, Tuple{typeof.(getindex.(args, 1))...})}(undef,
length(args[1]))
Threads.@threads for i in 1:length(args[1])
batch_data[i] = f(getindex.(args, i)...)
@@ -287,7 +334,7 @@ function solve_batch(prob, alg, ::EnsembleSplitThreads, II, pmap_batch_size; kwa
=#
batch_data = let
- pmap(wp, 1:N, batch_size = pmap_batch_size) do i
+ pmap(wp, 1:N; batch_size = pmap_batch_size) do i
if i == N
I_local = II[(batch_size * (i - 1) + 1):end]
else
diff --git a/src/ensemble/ensemble_analysis.jl b/src/ensemble/ensemble_analysis.jl
index c68fd5dade..1a27d5f5ca 100644
--- a/src/ensemble/ensemble_analysis.jl
+++ b/src/ensemble/ensemble_analysis.jl
@@ -81,16 +81,20 @@ function timeseries_steps_meanvar(sim)
DiffEqArray(means, sim[1].t), DiffEqArray(vars, sim[1].t)
end
function timeseries_steps_meancov(sim)
- reshape([timestep_meancov(sim, i, j) for i in 1:length(sim[1])
- for j in 1:length(sim[1])], length(sim[1]), length(sim[1]))
+ reshape([timestep_meancov(sim, i, j) for i in 1:length(sim[1]) for j in 1:length(sim[1])],
+ length(sim[1]),
+ length(sim[1]))
end
function timeseries_steps_meancor(sim)
- reshape([timestep_meancor(sim, i, j) for i in 1:length(sim[1])
- for j in 1:length(sim[1])], length(sim[1]), length(sim[1]))
+ reshape([timestep_meancor(sim, i, j) for i in 1:length(sim[1]) for j in 1:length(sim[1])],
+ length(sim[1]),
+ length(sim[1]))
end
function timeseries_steps_weighted_meancov(sim, W)
- reshape([timestep_meancov(sim, W, i, j) for i in 1:length(sim[1])
- for j in 1:length(sim[1])], length(sim[1]), length(sim[1]))
+ reshape([timestep_meancov(sim, W, i, j) for i in 1:length(sim[1]) for
+ j in 1:length(sim[1])],
+ length(sim[1]),
+ length(sim[1]))
end
timepoint_mean(sim, t) = componentwise_mean(get_timepoint(sim, t))
@@ -122,7 +126,8 @@ function timepoint_weighted_meancov(sim, W, t1, t2)
end
function SciMLBase.EnsembleSummary(sim::SciMLBase.AbstractEnsembleSolution{T, N},
- t = sim[1].t; quantiles = [0.05, 0.95]) where {T, N}
+ t = sim[1].t;
+ quantiles = [0.05, 0.95],) where {T, N}
if sim[1] isa SciMLSolution
m, v = timeseries_point_meanvar(sim, t)
med = timeseries_point_median(sim, t)
@@ -136,8 +141,23 @@ function SciMLBase.EnsembleSummary(sim::SciMLBase.AbstractEnsembleSolution{T, N}
end
trajectories = length(sim)
- EnsembleSummary{T, N, typeof(t), typeof(m), typeof(v), typeof(med), typeof(qlow),
- typeof(qhigh)}(t, m, v, med, qlow, qhigh, trajectories, sim.elapsedTime,
+ EnsembleSummary{
+ T,
+ N,
+ typeof(t),
+ typeof(m),
+ typeof(v),
+ typeof(med),
+ typeof(qlow),
+ typeof(qhigh),
+ }(t,
+ m,
+ v,
+ med,
+ qlow,
+ qhigh,
+ trajectories,
+ sim.elapsedTime,
sim.converged)
end
@@ -166,21 +186,24 @@ function timeseries_point_meancov(sim, ts)
timeseries_point_meancov(sim, ts[1:(end - 1)], ts[2:end])
end
function timeseries_point_meancov(sim, ts1, ts2)
- reshape([timepoint_meancov(sim, t1, t2) for t1 in ts1 for t2 in ts2], length(ts1),
+ reshape([timepoint_meancov(sim, t1, t2) for t1 in ts1 for t2 in ts2],
+ length(ts1),
length(ts2))
end
function timeseries_point_meancor(sim, ts)
timeseries_point_meancor(sim, ts[1:(end - 1)], ts[2:end])
end
function timeseries_point_meancor(sim, ts1, ts2)
- reshape([timepoint_meancor(sim, t1, t2) for t1 in ts1 for t2 in ts2], length(ts1),
+ reshape([timepoint_meancor(sim, t1, t2) for t1 in ts1 for t2 in ts2],
+ length(ts1),
length(ts2))
end
function timeseries_point_weighted_meancov(sim, W, ts)
timeseries_point_weighted_meancov(sim, W, ts[1:(end - 1)], ts[2:end])
end
function timeseries_point_weighted_meancov(sim, W, ts1, ts2)
- reshape([timepoint_meancov(sim, W, t1, t2) for t1 in ts1 for t2 in ts2], length(ts1),
+ reshape([timepoint_meancov(sim, W, t1, t2) for t1 in ts1 for t2 in ts2],
+ length(ts1),
length(ts2))
end
@@ -361,24 +384,40 @@ function componentwise_weighted_meancov(A, B, W; weight_type = :reliability)
end
export get_timestep,
- get_timepoint,
- componentwise_vectors_timestep, componentwise_vectors_timepoint
+ get_timepoint, componentwise_vectors_timestep, componentwise_vectors_timepoint
export componentwise_mean, componentwise_meanvar
-export timestep_mean, timestep_median, timestep_quantile, timestep_meanvar,
- timestep_meancov, timestep_meancor, timestep_weighted_meancov
+export timestep_mean,
+ timestep_median,
+ timestep_quantile,
+ timestep_meanvar,
+ timestep_meancov,
+ timestep_meancor,
+ timestep_weighted_meancov
-export timeseries_steps_mean, timeseries_steps_median, timeseries_steps_quantile,
- timeseries_steps_meanvar, timeseries_steps_meancov,
- timeseries_steps_meancor, timeseries_steps_weighted_meancov
+export timeseries_steps_mean,
+ timeseries_steps_median,
+ timeseries_steps_quantile,
+ timeseries_steps_meanvar,
+ timeseries_steps_meancov,
+ timeseries_steps_meancor,
+ timeseries_steps_weighted_meancov
-export timepoint_mean, timepoint_median, timepoint_quantile,
- timepoint_meanvar, timepoint_meancov,
- timepoint_meancor, timepoint_weighted_meancov
+export timepoint_mean,
+ timepoint_median,
+ timepoint_quantile,
+ timepoint_meanvar,
+ timepoint_meancov,
+ timepoint_meancor,
+ timepoint_weighted_meancov
-export timeseries_point_mean, timeseries_point_median, timeseries_point_quantile,
- timeseries_point_meanvar, timeseries_point_meancov,
- timeseries_point_meancor, timeseries_point_weighted_meancov
+export timeseries_point_mean,
+ timeseries_point_median,
+ timeseries_point_quantile,
+ timeseries_point_meanvar,
+ timeseries_point_meancov,
+ timeseries_point_meancor,
+ timeseries_point_weighted_meancov
end
diff --git a/src/ensemble/ensemble_problems.jl b/src/ensemble/ensemble_problems.jl
index 4ad4934954..bf8d39fa2b 100644
--- a/src/ensemble/ensemble_problems.jl
+++ b/src/ensemble/ensemble_problems.jl
@@ -20,14 +20,14 @@ function EnsembleProblem(prob::AbstractVector{<:AbstractSciMLProblem}; kwargs...
Tuple{Any},
prob;
prob_func = DEFAULT_VECTOR_PROB_FUNC,
- kwargs...)
+ kwargs...,)
end
function EnsembleProblem(prob;
- prob_func = DEFAULT_PROB_FUNC,
- output_func = DEFAULT_OUTPUT_FUNC,
- reduction = DEFAULT_REDUCTION,
- u_init = nothing,
- safetycopy = prob_func !== DEFAULT_PROB_FUNC)
+ prob_func = DEFAULT_PROB_FUNC,
+ output_func = DEFAULT_OUTPUT_FUNC,
+ reduction = DEFAULT_REDUCTION,
+ u_init = nothing,
+ safetycopy = prob_func !== DEFAULT_PROB_FUNC,)
_prob_func = prepare_function(prob_func)
_output_func = prepare_function(output_func)
_reduction = prepare_function(reduction)
@@ -35,26 +35,33 @@ function EnsembleProblem(prob;
EnsembleProblem(prob, _prob_func, _output_func, _reduction, _u_init, safetycopy)
end
-function EnsembleProblem(; prob,
- prob_func = DEFAULT_PROB_FUNC,
- output_func = DEFAULT_OUTPUT_FUNC,
- reduction = DEFAULT_REDUCTION,
- u_init = nothing, p = nothing,
- safetycopy = prob_func !== DEFAULT_PROB_FUNC)
+function EnsembleProblem(;
+ prob,
+ prob_func = DEFAULT_PROB_FUNC,
+ output_func = DEFAULT_OUTPUT_FUNC,
+ reduction = DEFAULT_REDUCTION,
+ u_init = nothing,
+ p = nothing,
+ safetycopy = prob_func !== DEFAULT_PROB_FUNC,)
EnsembleProblem(prob; prob_func, output_func, reduction, u_init, safetycopy)
end
#since NonlinearProblem might want to use this dispatch as well
-function SciMLBase.EnsembleProblem(prob::AbstractSciMLProblem, u0s::Vector{Vector{T}}; kwargs...) where {T}
- prob_func = (prob, i, repeat = nothing) -> remake(prob, u0 = u0s[i])
+function SciMLBase.EnsembleProblem(prob::AbstractSciMLProblem,
+ u0s::Vector{Vector{T}};
+ kwargs...,) where {T}
+ prob_func = (prob, i, repeat = nothing) -> remake(prob; u0 = u0s[i])
return SciMLBase.EnsembleProblem(prob; prob_func, kwargs...)
end
#only makes sense for OptimizationProblem, might make sense for IntervalNonlinearProblem
function SciMLBase.EnsembleProblem(prob::OptimizationProblem, trajectories::Int; kwargs...)
if prob.lb !== nothing && prob.ub !== nothing
- u0s = QuasiMonteCarlo.sample(trajectories, prob.lb, prob.ub, QuasiMonteCarlo.LatinHypercubeSample())
- prob_func = (prob, i, repeat = nothing) -> remake(prob, u0 = u0s[:, i])
+ u0s = QuasiMonteCarlo.sample(trajectories,
+ prob.lb,
+ prob.ub,
+ QuasiMonteCarlo.LatinHypercubeSample())
+ prob_func = (prob, i, repeat = nothing) -> remake(prob; u0 = u0s[:, i])
else
error("EnsembleProblem with `trajectories` as second argument requires lower and upper bounds to be defined in the `OptimizationProblem`.")
end
diff --git a/src/ensemble/ensemble_solutions.jl b/src/ensemble/ensemble_solutions.jl
index 5bb198512c..dcf8564722 100644
--- a/src/ensemble/ensemble_solutions.jl
+++ b/src/ensemble/ensemble_solutions.jl
@@ -10,16 +10,35 @@ struct EnsembleTestSolution{T, N, S} <: AbstractEnsembleSolution{T, N, S}
elapsedTime::Float64
converged::Bool
end
-function EnsembleTestSolution(sim::AbstractEnsembleSolution{T, N}, errors, weak_errors,
- error_means, error_medians, elapsedTime,
- converged) where {T, N}
- EnsembleTestSolution{T, N, typeof(sim.u)}(sim.u, errors, weak_errors, error_means,
- error_medians, sim.elapsedTime, sim.converged)
+function EnsembleTestSolution(sim::AbstractEnsembleSolution{T, N},
+ errors,
+ weak_errors,
+ error_means,
+ error_medians,
+ elapsedTime,
+ converged) where {T, N}
+ EnsembleTestSolution{T, N, typeof(sim.u)}(sim.u,
+ errors,
+ weak_errors,
+ error_means,
+ error_medians,
+ sim.elapsedTime,
+ sim.converged)
end
-function EnsembleTestSolution(u, errors, weak_errors, error_means, error_medians,
- elapsedTime, converged)
- EnsembleTestSolution(EnsembleSolution(u, elapsedTime, converged), errors, weak_errors,
- error_means, error_medians, elapsedTime, converged)
+function EnsembleTestSolution(u,
+ errors,
+ weak_errors,
+ error_means,
+ error_medians,
+ elapsedTime,
+ converged)
+ EnsembleTestSolution(EnsembleSolution(u, elapsedTime, converged),
+ errors,
+ weak_errors,
+ error_means,
+ error_medians,
+ elapsedTime,
+ converged)
end
"""
@@ -29,20 +48,22 @@ struct EnsembleSolution{T, N, S} <: AbstractEnsembleSolution{T, N, S}
u::S
elapsedTime::Float64
converged::Bool
- stats
+ stats::Any
end
function EnsembleSolution(sim, dims::NTuple{N}, elapsedTime, converged, stats) where {N}
- EnsembleSolution{eltype(eltype(sim)), N, typeof(sim)}(sim, elapsedTime, converged, stats)
+ EnsembleSolution{eltype(eltype(sim)), N, typeof(sim)}(sim,
+ elapsedTime,
+ converged,
+ stats)
end
-function EnsembleSolution(sim, elapsedTime, converged, stats=nothing)
+function EnsembleSolution(sim, elapsedTime, converged, stats = nothing)
EnsembleSolution(sim, (length(sim),), elapsedTime, converged, stats)
end # Vector of some type which is not an array
-function EnsembleSolution(sim::T, elapsedTime,
- converged, stats=nothing) where {T <: AbstractVector{T2}
-} where {T2 <:
- AbstractArray}
- EnsembleSolution{eltype(eltype(sim)), ndims(sim[1]) + 1, typeof(sim)}(
- sim,
+function EnsembleSolution(sim::T,
+ elapsedTime,
+ converged,
+ stats = nothing) where {T <: AbstractVector{T2}} where {T2 <: AbstractArray}
+ EnsembleSolution{eltype(eltype(sim)), ndims(sim[1]) + 1, typeof(sim)}(sim,
elapsedTime,
converged,
stats)
@@ -77,13 +98,17 @@ struct EnsembleSummary{T, N, Tt, S, S2, S3, S4, S5} <: AbstractEnsembleSolution{
end
function calculate_ensemble_errors(sim::AbstractEnsembleSolution; kwargs...)
- calculate_ensemble_errors(sim.u; elapsedTime = sim.elapsedTime,
- converged = sim.converged, kwargs...)
+ calculate_ensemble_errors(sim.u;
+ elapsedTime = sim.elapsedTime,
+ converged = sim.converged,
+ kwargs...,)
end
-function calculate_ensemble_errors(u; elapsedTime = 0.0, converged = false,
- weak_timeseries_errors = false,
- weak_dense_errors = false)
+function calculate_ensemble_errors(u;
+ elapsedTime = 0.0,
+ converged = false,
+ weak_timeseries_errors = false,
+ weak_dense_errors = false,)
errors = Dict{Symbol, Vector{eltype(u[1].u[1])}}() #Should add type information
error_means = Dict{Symbol, eltype(u[1].u[1])}()
error_medians = Dict{Symbol, eltype(u[1].u[1])}()
@@ -100,8 +125,8 @@ function calculate_ensemble_errors(u; elapsedTime = 0.0, converged = false,
res = norm(m_final - m_final_analytic)
weak_errors[:weak_final] = res
if weak_timeseries_errors
- ts_weak_errors = [mean([u[j][i] - u[j].u_analytic[i] for j in 1:length(u)])
- for i in 1:length(u[1])]
+ ts_weak_errors = [mean([u[j][i] - u[j].u_analytic[i] for j in 1:length(u)]) for
+ i in 1:length(u[1])]
ts_l2_errors = [sqrt.(sum(abs2, err) / length(err)) for err in ts_weak_errors]
l2_tmp = sqrt(sum(abs2, ts_l2_errors) / length(ts_l2_errors))
max_tmp = maximum([maximum(abs.(err)) for err in ts_weak_errors])
@@ -109,27 +134,37 @@ function calculate_ensemble_errors(u; elapsedTime = 0.0, converged = false,
weak_errors[:weak_l∞] = max_tmp
end
if weak_dense_errors
- densetimes = collect(range(u[1].t[1], stop = u[1].t[end], length = 100))
- u_analytic = [[sol.prob.f.analytic(sol.prob.u0, sol.prob.p, densetimes[i],
- sol.W(densetimes[i])[1])
- for i in eachindex(densetimes)] for sol in u]
+ densetimes = collect(range(u[1].t[1]; stop = u[1].t[end], length = 100))
+ u_analytic = [[sol.prob.f.analytic(sol.prob.u0,
+ sol.prob.p,
+ densetimes[i],
+ sol.W(densetimes[i])[1]) for i in eachindex(densetimes)] for sol in u]
udense = [u[j](densetimes) for j in 1:length(u)]
dense_weak_errors = [mean([udense[j][i] - u_analytic[j][i] for j in 1:length(u)])
- for i in eachindex(densetimes)]
+ for
+ i in eachindex(densetimes)]
dense_L2_errors = [sqrt.(sum(abs2, err) / length(err)) for err in dense_weak_errors]
L2_tmp = sqrt(sum(abs2, dense_L2_errors) / length(dense_L2_errors))
max_tmp = maximum([maximum(abs.(err)) for err in dense_weak_errors])
weak_errors[:weak_L2] = L2_tmp
weak_errors[:weak_L∞] = max_tmp
end
- return EnsembleTestSolution(u, errors, weak_errors, error_means, error_medians,
- elapsedTime, converged)
+ return EnsembleTestSolution(u,
+ errors,
+ weak_errors,
+ error_means,
+ error_medians,
+ elapsedTime,
+ converged)
end
### Displays
function Base.summary(io::IO, A::AbstractEnsembleSolution)
- print(io, "EnsembleSolution Solution of length ", length(A.u), " with uType:\n",
+ print(io,
+ "EnsembleSolution Solution of length ",
+ length(A.u),
+ " with uType:\n",
eltype(A.u))
end
function Base.show(io::IO, m::MIME"text/plain", A::AbstractEnsembleSolution)
@@ -139,9 +174,8 @@ end
### Plot Recipes
@recipe function f(sim::AbstractEnsembleSolution;
- zcolors = sim.u isa AbstractArray ? fill(nothing, length(sim.u)) :
- nothing,
- trajectories = eachindex(sim))
+ zcolors = sim.u isa AbstractArray ? fill(nothing, length(sim.u)) : nothing,
+ trajectories = eachindex(sim),)
for i in trajectories
size(sim[i].u, 1) == 0 && continue
@series begin
@@ -156,9 +190,9 @@ end
end
@recipe function f(sim::EnsembleSummary;
- trajectories = sim.u[1] isa AbstractArray ? eachindex(sim.u[1]) :
- 1,
- error_style = :ribbon, ci_type = :quantile)
+ trajectories = sim.u[1] isa AbstractArray ? eachindex(sim.u[1]) : 1,
+ error_style = :ribbon,
+ ci_type = :quantile,)
if ci_type == :SEM
if sim.u[1] isa AbstractArray
u = vecarr_to_vectors(sim.u)
@@ -214,8 +248,8 @@ Base.@propagate_inbounds function Base.getindex(x::AbstractEnsembleSolution, s,
end
Base.@propagate_inbounds function Base.getindex(x::AbstractEnsembleSolution,
- ::Colon,
- args::Colon...)
+ ::Colon,
+ args::Colon...)
return invoke(getindex,
Tuple{RecursiveArrayTools.AbstractVectorOfArray, Colon, typeof.(args)...},
x,
@@ -228,5 +262,5 @@ function (sol::AbstractEnsembleSolution)(args...; kwargs...)
end
Base.@propagate_inbounds function Base.getindex(sol::WeightedEnsembleSolution, S)
- return [sum(stack(sol.weights .* sol.ensol[:, S]), dims = 2)]
+ return [sum(stack(sol.weights .* sol.ensol[:, S]); dims = 2)]
end
diff --git a/src/function_wrappers.jl b/src/function_wrappers.jl
index 93f5394c43..1e72621803 100644
--- a/src/function_wrappers.jl
+++ b/src/function_wrappers.jl
@@ -11,7 +11,9 @@ function TimeGradientWrapper(f::F, uprev, p) where {F}
return TimeGradientWrapper{isinplace(f, 4)}(f, uprev, p)
end
-(ff::TimeGradientWrapper{true})(t) = (du2 = similar(ff.uprev); ff.f(du2, ff.uprev, ff.p, t); du2)
+function (ff::TimeGradientWrapper{true})(t)
+ (du2 = similar(ff.uprev); ff.f(du2, ff.uprev, ff.p, t); du2)
+end
(ff::TimeGradientWrapper{true})(du2, t) = ff.f(du2, ff.uprev, ff.p, t)
(ff::TimeGradientWrapper{false})(t) = ff.f(ff.uprev, ff.p, t)
@@ -28,9 +30,13 @@ end
UJacobianWrapper(f::F, t, p) where {F} = UJacobianWrapper{isinplace(f, 4)}(f, t, p)
(ff::UJacobianWrapper{true})(du1, uprev) = ff.f(du1, uprev, ff.p, ff.t)
-(ff::UJacobianWrapper{true})(uprev) = (du1 = similar(uprev); ff.f(du1, uprev, ff.p, ff.t); du1)
+function (ff::UJacobianWrapper{true})(uprev)
+ (du1 = similar(uprev); ff.f(du1, uprev, ff.p, ff.t); du1)
+end
(ff::UJacobianWrapper{true})(du1, uprev, p, t) = ff.f(du1, uprev, p, t)
-(ff::UJacobianWrapper{true})(uprev, p, t) = (du1 = similar(uprev); ff.f(du1, uprev, p, t); du1)
+function (ff::UJacobianWrapper{true})(uprev, p, t)
+ (du1 = similar(uprev); ff.f(du1, uprev, p, t); du1)
+end
(ff::UJacobianWrapper{false})(uprev) = ff.f(uprev, ff.p, ff.t)
(ff::UJacobianWrapper{false})(uprev, p, t) = ff.f(uprev, p, t)
diff --git a/src/integrator_interface.jl b/src/integrator_interface.jl
index 7046398a7b..81e77b23b9 100644
--- a/src/integrator_interface.jl
+++ b/src/integrator_interface.jl
@@ -1,5 +1,6 @@
"""
step!(integ::DEIntegrator [, dt [, stop_at_tdt]])
+
Perform one (successful) step on the integrator.
Alternative, if a `dt` is given, then `step!` the integrator until
@@ -76,6 +77,7 @@ end
Resizes the non-user facing caches to be compatible with a DE of size `k`. This includes resizing Jacobian caches.
!!! note
+
In many cases, [`resize!`](@ref) simply resizes [`full_cache`](@ref) variables and then
calls this function. This finer control is required for some `AbstractArray`
operations.
@@ -90,6 +92,7 @@ end
[`deleteat!`](@ref)s the non-user facing caches at indices `idxs`. This includes resizing Jacobian caches.
!!! note
+
In many cases, `deleteat!` simply `deleteat!`s [`full_cache`](@ref) variables and then
calls this function. This finer control is required for some `AbstractArray`
operations.
@@ -102,7 +105,9 @@ end
addat_non_user_cache!(i::DEIntegrator,idxs)
[`addat!`](@ref)s the non-user facing caches at indices `idxs`. This includes resizing Jacobian caches.
+
!!! note
+
In many cases, `addat!` simply `addat!`s [`full_cache`](@ref) variables and then
calls this function. This finer control is required for some `AbstractArray`
operations.
@@ -174,10 +179,12 @@ and if `savevalues!` saved at the current time point, then `savedexactly` is
true.
The saving priority/order is as follows:
+
- `save_on`
- - `saveat`
- - `force_save`
- - `save_everystep`
+
+ + `saveat`
+ + `force_save`
+ + `save_everystep`
"""
function savevalues!(i::DEIntegrator)
error("savevalues!: method has not been implemented for the integrator")
@@ -255,18 +262,19 @@ The reinit function lets you restart the integration at a new value.
# Arguments
-- `u0`: Value of `u` to start at. Default value is `integrator.sol.prob.u0`
+ - `u0`: Value of `u` to start at. Default value is `integrator.sol.prob.u0`
# Keyword Arguments
-- `t0`: Starting timepoint. Default value is `integrator.sol.prob.tspan[1]`
-- `tf`: Ending timepoint. Default value is `integrator.sol.prob.tspan[2]`
-- `erase_sol=true`: Whether to start with no other values in the solution, or keep the previous solution.
-- `tstops`, `d_discontinuities`, & `saveat`: Cache where these are stored. Default is the original cache.
-- `reset_dt`: Set whether to reset the current value of `dt` using the automatic `dt` determination algorithm. Default is
- `(integrator.dtcache == zero(integrator.dt)) && integrator.opts.adaptive`
-- `reinit_callbacks`: Set whether to run the callback initializations again (and `initialize_save` is for that). Default is `true`.
-- `reinit_cache`: Set whether to re-run the cache initialization function (i.e. resetting FSAL, not allocating vectors)
- which should usually be true for correctness. Default is `true`.
+
+ - `t0`: Starting timepoint. Default value is `integrator.sol.prob.tspan[1]`
+ - `tf`: Ending timepoint. Default value is `integrator.sol.prob.tspan[2]`
+ - `erase_sol=true`: Whether to start with no other values in the solution, or keep the previous solution.
+ - `tstops`, `d_discontinuities`, & `saveat`: Cache where these are stored. Default is the original cache.
+ - `reset_dt`: Set whether to reset the current value of `dt` using the automatic `dt` determination algorithm. Default is
+ `(integrator.dtcache == zero(integrator.dt)) && integrator.opts.adaptive`
+ - `reinit_callbacks`: Set whether to run the callback initializations again (and `initialize_save` is for that). Default is `true`.
+ - `reinit_cache`: Set whether to re-run the cache initialization function (i.e. resetting FSAL, not allocating vectors)
+ which should usually be true for correctness. Default is `true`.
Additionally, once can access [`auto_dt_reset!`](@ref) which will run the auto `dt` initialization algorithm.
"""
@@ -425,9 +433,14 @@ function sym_to_index(sym, integrator::DEIntegrator)
end
Base.@propagate_inbounds function Base.getindex(A::DEIntegrator,
- I::Union{Int, AbstractArray{Int},
- CartesianIndex, Colon, BitArray,
- AbstractArray{Bool}}...)
+ I::Union{
+ Int,
+ AbstractArray{Int},
+ CartesianIndex,
+ Colon,
+ BitArray,
+ AbstractArray{Bool},
+ }...)
RecursiveArrayTools.VectorOfArray(A.u)[I...]
end
@@ -445,8 +458,7 @@ Base.@propagate_inbounds function Base.getindex(A::DEIntegrator, sym)
if i === nothing
if issymbollike(sym)
- if has_sys(A.f) && is_indep_sym(A.f.sys, sym) ||
- Symbol(sym) == getindepsym(A)
+ if has_sys(A.f) && is_indep_sym(A.f.sys, sym) || Symbol(sym) == getindepsym(A)
return A.t
elseif has_sys(A.f) && is_param_sym(A.f.sys, sym)
return A.p[param_sym_to_index(A.f.sys, sym)]
@@ -454,7 +466,8 @@ Base.@propagate_inbounds function Base.getindex(A::DEIntegrator, sym)
return A.p[findfirst(x -> isequal(x, Symbol(sym)), getparamsyms(A))]
elseif (sym isa Symbol) && has_sys(A.f) && hasproperty(A.f.sys, sym) # Handles input like :X (where X is a state).
return observed(A, getproperty(A.f.sys, sym))
- elseif has_sys(A.f) && (count('₊', String(Symbol(sym))) == 1) &&
+ elseif has_sys(A.f) &&
+ (count('₊', String(Symbol(sym))) == 1) &&
(count(isequal(Symbol(sym)),
Symbol.(A.f.sys.name, :₊, getparamsyms(A))) == 1) # Handles input like sys.X (where X is a parameter).
return A.p[findfirst(isequal(Symbol(sym)),
@@ -535,11 +548,16 @@ has_reinit(i::DEIntegrator) = false
function Base.summary(io::IO, I::DEIntegrator)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(I)),
- no_color, " with uType ",
- type_color, typeof(I.u),
- no_color, " and tType ",
- type_color, typeof(I.t),
+ type_color,
+ nameof(typeof(I)),
+ no_color,
+ " with uType ",
+ type_color,
+ typeof(I.u),
+ no_color,
+ " and tType ",
+ type_color,
+ typeof(I.t),
no_color)
end
function Base.show(io::IO, A::DEIntegrator)
@@ -585,11 +603,13 @@ function check_error(integrator::DEIntegrator)
# The last part:
# If you are close to the end, don't exit: let the user hit the end!
# However, if we try that and the step fails, exit instead of infinite loop
- if !integrator.opts.force_dtmin && integrator.opts.adaptive &&
+ if !integrator.opts.force_dtmin &&
+ integrator.opts.adaptive &&
abs(integrator.dt) <= abs(integrator.opts.dtmin) &&
(((hasproperty(integrator, :opts) && hasproperty(integrator.opts, :tstops)) ?
- integrator.t + integrator.dt < integrator.tdir * first(integrator.opts.tstops) :
- true) || (hasproperty(integrator, :accept_step) && !integrator.accept_step))
+ integrator.t + integrator.dt <
+ integrator.tdir * first(integrator.opts.tstops) : true) ||
+ (hasproperty(integrator, :accept_step) && !integrator.accept_step))
if integrator.opts.verbose
if isdefined(integrator, :EEst)
EEst = ", and step error estimate = $(integrator.EEst)"
@@ -600,7 +620,9 @@ function check_error(integrator::DEIntegrator)
end
return ReturnCode.DtLessThanMin
end
- if integrator.opts.unstable_check(integrator.dt, integrator.u, integrator.p,
+ if integrator.opts.unstable_check(integrator.dt,
+ integrator.u,
+ integrator.p,
integrator.t)
if integrator.opts.verbose
@warn("Instability detected. Aborting")
@@ -675,10 +697,8 @@ function Base.iterate(tup::IntegratorTuples, state = 0)
end
function Base.eltype(::Type{
- IntegratorTuples{I},
-}) where {U, T,
- I <:
- DEIntegrator{<:Any, <:Any, U, T}}
+ IntegratorTuples{I},
+ }) where {U, T, I <: DEIntegrator{<:Any, <:Any, U, T}}
Tuple{U, T}
end
Base.IteratorSize(::Type{<:IntegratorTuples}) = Base.SizeUnknown()
@@ -702,11 +722,8 @@ function Base.iterate(tup::IntegratorIntervals, state = 0)
end
function Base.eltype(::Type{
- IntegratorIntervals{I},
-}) where {U, T,
- I <:
- DEIntegrator{<:Any, <:Any, U, T
- }}
+ IntegratorIntervals{I},
+ }) where {U, T, I <: DEIntegrator{<:Any, <:Any, U, T}}
Tuple{U, T, U, T}
end
Base.IteratorSize(::Type{<:IntegratorIntervals}) = Base.SizeUnknown()
@@ -749,14 +766,16 @@ end
Base.length(iter::TimeChoiceIterator) = length(iter.ts)
@recipe function f(integrator::DEIntegrator;
- denseplot = (integrator.opts.calck ||
- integrator isa AbstractSDEIntegrator) &&
- integrator.iter > 0,
- plotdensity = 10,
- plot_analytic = false, vars = nothing, idxs = nothing)
+ denseplot = (integrator.opts.calck || integrator isa AbstractSDEIntegrator) &&
+ integrator.iter > 0,
+ plotdensity = 10,
+ plot_analytic = false,
+ vars = nothing,
+ idxs = nothing,)
if vars !== nothing
Base.depwarn("To maintain consistency with solution indexing, keyword argument vars will be removed in a future version. Please use keyword argument idxs instead.",
- :f; force = true)
+ :f;
+ force = true,)
(idxs !== nothing) &&
error("Simultaneously using keywords vars and idxs is not supported. Please only use idxs.")
idxs = vars
@@ -818,11 +837,13 @@ Base.length(iter::TimeChoiceIterator) = length(iter.ts)
push!(plot_vecs[j], integrator.t)
elseif x[j] == 1 && !(integrator.u isa AbstractArray)
push!(plot_vecs[j],
- integrator.sol.prob.f(Val{:analytic}, integrator.t,
+ integrator.sol.prob.f(Val{:analytic},
+ integrator.t,
integrator.sol[1]))
else
push!(plot_vecs[j],
- integrator.sol.prob.f(Val{:analytic}, integrator.t,
+ integrator.sol.prob.f(Val{:analytic},
+ integrator.t,
integrator.sol[1])[x[j]])
end
end
diff --git a/src/interpolation.jl b/src/interpolation.jl
index d212ac82f6..b6c99a71ef 100644
--- a/src/interpolation.jl
+++ b/src/interpolation.jl
@@ -72,20 +72,29 @@ end
function (id::SensitivityInterpolation)(tvals, idxs, deriv, p, continuity::Symbol = :left)
interpolation(tvals, id, idxs, deriv, p, continuity)
end
-function (id::SensitivityInterpolation)(val, tvals, idxs, deriv, p,
- continuity::Symbol = :left)
+function (id::SensitivityInterpolation)(val,
+ tvals,
+ idxs,
+ deriv,
+ p,
+ continuity::Symbol = :left)
interpolation!(val, tvals, id, idxs, deriv, p, continuity)
end
-@inline function interpolation(tvals, id::I, idxs, deriv::D, p,
- continuity::Symbol = :left) where {I, D}
+@inline function interpolation(tvals,
+ id::I,
+ idxs,
+ deriv::D,
+ p,
+ continuity::Symbol = :left) where {I, D}
t = id.t
u = id.u
id isa HermiteInterpolation && (du = id.du)
tdir = sign(t[end] - t[1])
- idx = sortperm(tvals, rev = tdir < 0)
+ idx = sortperm(tvals; rev = tdir < 0)
i = 2 # Start the search thinking it's between t[1] and t[2]
- t[end] == t[1] && (tvals[idx[1]] != t[1] || tvals[idx[end]] != t[1]) &&
+ t[end] == t[1] &&
+ (tvals[idx[1]] != t[1] || tvals[idx[end]] != t[1]) &&
error("Solution interpolation cannot extrapolate from a single timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
tdir * tvals[idx[end]] > tdir * t[end] &&
error("Solution interpolation cannot extrapolate past the final timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
@@ -100,7 +109,7 @@ end
end
for j in idx
tval = tvals[j]
- i = searchsortedfirst(@view(t[i:end]), tval, rev = tdir < 0) + i - 1 # It's in the interval t[i-1] to t[i]
+ i = searchsortedfirst(@view(t[i:end]), tval; rev = tdir < 0) + i - 1 # It's in the interval t[i-1] to t[i]
avoid_constant_ends = deriv != Val{0} #|| tval isa ForwardDiff.Dual
avoid_constant_ends && i == 1 && (i += 1)
if !avoid_constant_ends && t[i - 1] == tval # Can happen if it's the first value!
@@ -123,8 +132,15 @@ end
Θ = (tval - t[i - 1]) / dt
idxs_internal = idxs
if id isa HermiteInterpolation
- vals[j] = interpolant(Θ, id, dt, u[i - 1], u[i], du[i - 1], du[i],
- idxs_internal, deriv)
+ vals[j] = interpolant(Θ,
+ id,
+ dt,
+ u[i - 1],
+ u[i],
+ du[i - 1],
+ du[i],
+ idxs_internal,
+ deriv)
else
vals[j] = interpolant(Θ, id, dt, u[i - 1], u[i], idxs_internal, deriv)
end
@@ -139,15 +155,21 @@ $(SIGNATURES)
Get the value at tvals where the solution is known at the
times t (sorted), with values u and derivatives ks
"""
-@inline function interpolation!(vals, tvals, id::I, idxs, deriv::D, p,
- continuity::Symbol = :left) where {I, D}
+@inline function interpolation!(vals,
+ tvals,
+ id::I,
+ idxs,
+ deriv::D,
+ p,
+ continuity::Symbol = :left) where {I, D}
t = id.t
u = id.u
id isa HermiteInterpolation && (du = id.du)
tdir = sign(t[end] - t[1])
- idx = sortperm(tvals, rev = tdir < 0)
+ idx = sortperm(tvals; rev = tdir < 0)
i = 2 # Start the search thinking it's between t[1] and t[2]
- t[end] == t[1] && (tvals[idx[1]] != t[1] || tvals[idx[end]] != t[1]) &&
+ t[end] == t[1] &&
+ (tvals[idx[1]] != t[1] || tvals[idx[end]] != t[1]) &&
error("Solution interpolation cannot extrapolate from a single timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
tdir * tvals[idx[end]] > tdir * t[end] &&
error("Solution interpolation cannot extrapolate past the final timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
@@ -155,7 +177,7 @@ times t (sorted), with values u and derivatives ks
error("Solution interpolation cannot extrapolate before the first timepoint. Either start solving earlier or use the local extrapolation from the integrator interface.")
for j in idx
tval = tvals[j]
- i = searchsortedfirst(@view(t[i:end]), tval, rev = tdir < 0) + i - 1 # It's in the interval t[i-1] to t[i]
+ i = searchsortedfirst(@view(t[i:end]), tval; rev = tdir < 0) + i - 1 # It's in the interval t[i-1] to t[i]
avoid_constant_ends = deriv != Val{0} #|| tval isa ForwardDiff.Dual
avoid_constant_ends && i == 1 && (i += 1)
if !avoid_constant_ends && t[i - 1] == tval # Can happen if it's the first value!
@@ -179,15 +201,30 @@ times t (sorted), with values u and derivatives ks
idxs_internal = idxs
if eltype(u) <: Union{AbstractArray, ArrayPartition}
if id isa HermiteInterpolation
- interpolant!(vals[j], Θ, id, dt, u[i - 1], u[i], du[i - 1], du[i],
- idxs_internal, deriv)
+ interpolant!(vals[j],
+ Θ,
+ id,
+ dt,
+ u[i - 1],
+ u[i],
+ du[i - 1],
+ du[i],
+ idxs_internal,
+ deriv)
else
interpolant!(vals[j], Θ, id, dt, u[i - 1], u[i], idxs_internal, deriv)
end
else
if id isa HermiteInterpolation
- vals[j] = interpolant(Θ, id, dt, u[i - 1], u[i], du[i - 1], du[i],
- idxs_internal, deriv)
+ vals[j] = interpolant(Θ,
+ id,
+ dt,
+ u[i - 1],
+ u[i],
+ du[i - 1],
+ du[i],
+ idxs_internal,
+ deriv)
else
vals[j] = interpolant(Θ, id, dt, u[i - 1], u[i], idxs_internal, deriv)
end
@@ -202,19 +239,24 @@ $(SIGNATURES)
Get the value at tval where the solution is known at the
times t (sorted), with values u and derivatives ks
"""
-@inline function interpolation(tval::Number, id::I, idxs, deriv::D, p,
- continuity::Symbol = :left) where {I, D}
+@inline function interpolation(tval::Number,
+ id::I,
+ idxs,
+ deriv::D,
+ p,
+ continuity::Symbol = :left) where {I, D}
t = id.t
u = id.u
id isa HermiteInterpolation && (du = id.du)
tdir = sign(t[end] - t[1])
- t[end] == t[1] && tval != t[end] &&
+ t[end] == t[1] &&
+ tval != t[end] &&
error("Solution interpolation cannot extrapolate from a single timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
tdir * tval > tdir * t[end] &&
error("Solution interpolation cannot extrapolate past the final timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
tdir * tval < tdir * t[1] &&
error("Solution interpolation cannot extrapolate before the first timepoint. Either start solving earlier or use the local extrapolation from the integrator interface.")
- @inbounds i = searchsortedfirst(t, tval, rev = tdir < 0) # It's in the interval t[i-1] to t[i]
+ @inbounds i = searchsortedfirst(t, tval; rev = tdir < 0) # It's in the interval t[i-1] to t[i]
avoid_constant_ends = deriv != Val{0} #|| tval isa ForwardDiff.Dual
avoid_constant_ends && i == 1 && (i += 1)
if !avoid_constant_ends && t[i] == tval
@@ -237,7 +279,14 @@ times t (sorted), with values u and derivatives ks
Θ = (tval - t[i - 1]) / dt
idxs_internal = idxs
if id isa HermiteInterpolation
- val = interpolant(Θ, id, dt, u[i - 1], u[i], du[i - 1], du[i], idxs_internal,
+ val = interpolant(Θ,
+ id,
+ dt,
+ u[i - 1],
+ u[i],
+ du[i - 1],
+ du[i],
+ idxs_internal,
deriv)
else
val = interpolant(Θ, id, dt, u[i - 1], u[i], idxs_internal, deriv)
@@ -252,19 +301,25 @@ $(SIGNATURES)
Get the value at tval where the solution is known at the
times t (sorted), with values u and derivatives ks
"""
-@inline function interpolation!(out, tval::Number, id::I, idxs, deriv::D, p,
- continuity::Symbol = :left) where {I, D}
+@inline function interpolation!(out,
+ tval::Number,
+ id::I,
+ idxs,
+ deriv::D,
+ p,
+ continuity::Symbol = :left) where {I, D}
t = id.t
u = id.u
id isa HermiteInterpolation && (du = id.du)
tdir = sign(t[end] - t[1])
- t[end] == t[1] && tval != t[end] &&
+ t[end] == t[1] &&
+ tval != t[end] &&
error("Solution interpolation cannot extrapolate from a single timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
tdir * tval > tdir * t[end] &&
error("Solution interpolation cannot extrapolate past the final timepoint. Either solve on a longer timespan or use the local extrapolation from the integrator interface.")
tdir * tval < tdir * t[1] &&
error("Solution interpolation cannot extrapolate before the first timepoint. Either start solving earlier or use the local extrapolation from the integrator interface.")
- @inbounds i = searchsortedfirst(t, tval, rev = tdir < 0) # It's in the interval t[i-1] to t[i]
+ @inbounds i = searchsortedfirst(t, tval; rev = tdir < 0) # It's in the interval t[i-1] to t[i]
avoid_constant_ends = deriv != Val{0} #|| tval isa ForwardDiff.Dual
avoid_constant_ends && i == 1 && (i += 1)
if !avoid_constant_ends && t[i] == tval
@@ -287,7 +342,15 @@ times t (sorted), with values u and derivatives ks
Θ = (tval - t[i - 1]) / dt
idxs_internal = idxs
if id isa HermiteInterpolation
- interpolant!(out, Θ, id, dt, u[i - 1], u[i], du[i - 1], du[i], idxs_internal,
+ interpolant!(out,
+ Θ,
+ id,
+ dt,
+ u[i - 1],
+ u[i],
+ du[i - 1],
+ du[i],
+ idxs_internal,
deriv)
else
interpolant!(out, Θ, id, dt, u[i - 1], u[i], idxs_internal, deriv)
@@ -295,8 +358,15 @@ times t (sorted), with values u and derivatives ks
end
end
-@inline function interpolant(Θ, id::AbstractDiffEqInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- ::Type{Val{D}}) where {D}
+@inline function interpolant(Θ,
+ id::AbstractDiffEqInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ ::Type{Val{D}}) where {D}
error("$(string(typeof(id))) for $(D)th order not implemented")
end
##################### Hermite Interpolants
@@ -306,22 +376,36 @@ Hairer Norsett Wanner Solving Ordinary Differential Equations I - Nonstiff Probl
Hermite Interpolation
"""
-@inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{0}})
+@inline function interpolant(Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{0}})
if idxs === nothing
- out = @. (1 - Θ) * y₀ + Θ * y₁ +
+ out = @. (1 - Θ) * y₀ +
+ Θ * y₁ +
Θ * (Θ - 1) * ((1 - 2Θ) * (y₁ - y₀) + (Θ - 1) * dt * dy₀ + Θ * dt * dy₁)
elseif idxs isa Number
- out = (1 - Θ) * y₀[idxs] + Θ * y₁[idxs] +
- Θ * (Θ - 1) *
+ out = (1 - Θ) * y₀[idxs] +
+ Θ * y₁[idxs] +
+ Θ *
+ (Θ - 1) *
((1 - 2Θ) * (y₁[idxs] - y₀[idxs]) +
- (Θ - 1) * dt * dy₀[idxs] + Θ * dt * dy₁[idxs])
+ (Θ - 1) * dt * dy₀[idxs] +
+ Θ * dt * dy₁[idxs])
else
out = similar(y₀, axes(idxs))
- @views @. out = (1 - Θ) * y₀[idxs] + Θ * y₁[idxs] +
- Θ * (Θ - 1) *
+ @views @. out = (1 - Θ) * y₀[idxs] +
+ Θ * y₁[idxs] +
+ Θ *
+ (Θ - 1) *
((1 - 2Θ) * (y₁[idxs] - y₀[idxs]) +
- (Θ - 1) * dt * dy₀[idxs] + Θ * dt * dy₁[idxs])
+ (Θ - 1) * dt * dy₀[idxs] +
+ Θ * dt * dy₁[idxs])
end
out
end
@@ -329,25 +413,34 @@ end
"""
Hermite Interpolation
"""
-@inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{1}})
+@inline function interpolant(Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{1}})
if idxs === nothing
out = @. dy₀ +
Θ * (-4 * dt * dy₀ - 2 * dt * dy₁ - 6 * y₀ +
- Θ * (3 * dt * dy₀ + 3 * dt * dy₁ + 6 * y₀ - 6 * y₁) + 6 * y₁) / dt
+ Θ * (3 * dt * dy₀ + 3 * dt * dy₁ + 6 * y₀ - 6 * y₁) +
+ 6 * y₁) / dt
elseif idxs isa Number
out = dy₀[idxs] +
- Θ * (-4 * dt * dy₀[idxs] -
- 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
- Θ * (3 * dt * dy₀[idxs] + 3 * dt * dy₁[idxs] +
- 6 * y₀[idxs] - 6 * y₁[idxs]) + 6 * y₁[idxs]) / dt
+ Θ * (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
+ Θ *
+ (3 * dt * dy₀[idxs] + 3 * dt * dy₁[idxs] + 6 * y₀[idxs] - 6 * y₁[idxs]) +
+ 6 * y₁[idxs]) / dt
else
out = similar(y₀, axes(idxs))
@views @. out = dy₀[idxs] +
- Θ * (-4 * dt * dy₀[idxs] -
- 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
- Θ * (3 * dt * dy₀[idxs] + 3 * dt * dy₁[idxs] +
- 6 * y₀[idxs] - 6 * y₁[idxs]) + 6 * y₁[idxs]) / dt
+ Θ * (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
+ Θ *
+ (3 * dt * dy₀[idxs] + 3 * dt * dy₁[idxs] + 6 * y₀[idxs] -
+ 6 * y₁[idxs]) +
+ 6 * y₁[idxs]) / dt
end
out
end
@@ -355,21 +448,31 @@ end
"""
Hermite Interpolation
"""
-@inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{2}})
+@inline function interpolant(Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{2}})
if idxs === nothing
out = @. (-4 * dt * dy₀ - 2 * dt * dy₁ - 6 * y₀ +
- Θ * (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) + 6 * y₁) /
- (dt * dt)
+ Θ * (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) +
+ 6 * y₁) / (dt * dt)
elseif idxs isa Number
out = (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
- Θ * (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] -
- 12 * y₁[idxs]) + 6 * y₁[idxs]) / (dt * dt)
+ Θ *
+ (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] - 12 * y₁[idxs]) +
+ 6 * y₁[idxs]) / (dt * dt)
else
out = similar(y₀, axes(idxs))
@views @. out = (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
- Θ * (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] -
- 12 * y₁[idxs]) + 6 * y₁[idxs]) / (dt * dt)
+ Θ *
+ (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] -
+ 12 * y₁[idxs]) +
+ 6 * y₁[idxs]) / (dt * dt)
end
out
end
@@ -377,17 +480,25 @@ end
"""
Hermite Interpolation
"""
-@inline function interpolant(Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{3}})
+@inline function interpolant(Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{3}})
if idxs === nothing
out = @. (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) / (dt * dt * dt)
elseif idxs isa Number
- out = (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] +
- 12 * y₀[idxs] - 12 * y₁[idxs]) / (dt * dt * dt)
+ out = (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] - 12 * y₁[idxs]) /
+ (dt * dt * dt)
else
out = similar(y₀, axes(idxs))
- @views @. out = (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] +
- 12 * y₀[idxs] - 12 * y₁[idxs]) / (dt * dt * dt)
+ @views @. out = (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] -
+ 12 * y₁[idxs]) /
+ (dt * dt * dt)
end
out
end
@@ -397,20 +508,35 @@ Hairer Norsett Wanner Solving Ordinary Differential Euations I - Nonstiff Proble
Hermite Interpolation
"""
-@inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{0}})
+@inline function interpolant!(out,
+ Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{0}})
if out === nothing
- return (1 - Θ) * y₀[idxs] + Θ * y₁[idxs] +
- Θ * (Θ - 1) *
- ((1 - 2Θ) * (y₁[idxs] - y₀[idxs]) + (Θ - 1) * dt * dy₀[idxs] +
+ return (1 - Θ) * y₀[idxs] +
+ Θ * y₁[idxs] +
+ Θ *
+ (Θ - 1) *
+ ((1 - 2Θ) * (y₁[idxs] - y₀[idxs]) +
+ (Θ - 1) * dt * dy₀[idxs] +
Θ * dt * dy₁[idxs])
elseif idxs === nothing
- @. out = (1 - Θ) * y₀ + Θ * y₁ +
+ @. out = (1 - Θ) * y₀ +
+ Θ * y₁ +
Θ * (Θ - 1) * ((1 - 2Θ) * (y₁ - y₀) + (Θ - 1) * dt * dy₀ + Θ * dt * dy₁)
else
- @views @. out = (1 - Θ) * y₀[idxs] + Θ * y₁[idxs] +
- Θ * (Θ - 1) *
- ((1 - 2Θ) * (y₁[idxs] - y₀[idxs]) + (Θ - 1) * dt * dy₀[idxs] +
+ @views @. out = (1 - Θ) * y₀[idxs] +
+ Θ * y₁[idxs] +
+ Θ *
+ (Θ - 1) *
+ ((1 - 2Θ) * (y₁[idxs] - y₀[idxs]) +
+ (Θ - 1) * dt * dy₀[idxs] +
Θ * dt * dy₁[idxs])
end
end
@@ -418,8 +544,16 @@ end
"""
Hermite Interpolation
"""
-@inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{1}})
+@inline function interpolant!(out,
+ Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{1}})
if out === nothing
return dy₀[idxs] +
Θ * (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
@@ -429,20 +563,31 @@ Hermite Interpolation
elseif idxs === nothing
@. out = dy₀ +
Θ * (-4 * dt * dy₀ - 2 * dt * dy₁ - 6 * y₀ +
- Θ * (3 * dt * dy₀ + 3 * dt * dy₁ + 6 * y₀ - 6 * y₁) + 6 * y₁) / dt
+ Θ * (3 * dt * dy₀ + 3 * dt * dy₁ + 6 * y₀ - 6 * y₁) +
+ 6 * y₁) / dt
else
@views @. out = dy₀[idxs] +
Θ * (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
- Θ * (3 * dt * dy₀[idxs] + 3 * dt * dy₁[idxs] + 6 * y₀[idxs] -
- 6 * y₁[idxs]) + 6 * y₁[idxs]) / dt
+ Θ *
+ (3 * dt * dy₀[idxs] + 3 * dt * dy₁[idxs] + 6 * y₀[idxs] -
+ 6 * y₁[idxs]) +
+ 6 * y₁[idxs]) / dt
end
end
"""
Hermite Interpolation
"""
-@inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{2}})
+@inline function interpolant!(out,
+ Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{2}})
if out === nothing
return (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
Θ *
@@ -450,20 +595,30 @@ Hermite Interpolation
6 * y₁[idxs]) / (dt * dt)
elseif idxs === nothing
@. out = (-4 * dt * dy₀ - 2 * dt * dy₁ - 6 * y₀ +
- Θ * (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) + 6 * y₁) /
- (dt * dt)
+ Θ * (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) +
+ 6 * y₁) / (dt * dt)
else
@views @. out = (-4 * dt * dy₀[idxs] - 2 * dt * dy₁[idxs] - 6 * y₀[idxs] +
- Θ * (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] -
- 12 * y₁[idxs]) + 6 * y₁[idxs]) / (dt * dt)
+ Θ *
+ (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] -
+ 12 * y₁[idxs]) +
+ 6 * y₁[idxs]) / (dt * dt)
end
end
"""
Hermite Interpolation
"""
-@inline function interpolant!(out, Θ, id::HermiteInterpolation, dt, y₀, y₁, dy₀, dy₁, idxs,
- T::Type{Val{3}})
+@inline function interpolant!(out,
+ Θ,
+ id::HermiteInterpolation,
+ dt,
+ y₀,
+ y₁,
+ dy₀,
+ dy₁,
+ idxs,
+ T::Type{Val{3}})
if out === nothing
return (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] - 12 * y₁[idxs]) /
(dt * dt * dt)
@@ -471,7 +626,8 @@ Hermite Interpolation
@. out = (6 * dt * dy₀ + 6 * dt * dy₁ + 12 * y₀ - 12 * y₁) / (dt * dt * dt)
else
@views @. out = (6 * dt * dy₀[idxs] + 6 * dt * dy₁[idxs] + 12 * y₀[idxs] -
- 12 * y₁[idxs]) / (dt * dt * dt)
+ 12 * y₁[idxs]) /
+ (dt * dt * dt)
end
end
@@ -508,8 +664,14 @@ end
"""
Linear Interpolation
"""
-@inline function interpolant!(out, Θ, id::LinearInterpolation, dt, y₀, y₁, idxs,
- T::Type{Val{0}})
+@inline function interpolant!(out,
+ Θ,
+ id::LinearInterpolation,
+ dt,
+ y₀,
+ y₁,
+ idxs,
+ T::Type{Val{0}})
Θm1 = (1 - Θ)
if out === nothing
return Θm1 * y₀[idxs] + Θ * y₁[idxs]
@@ -523,8 +685,14 @@ end
"""
Linear Interpolation
"""
-@inline function interpolant!(out, Θ, id::LinearInterpolation, dt, y₀, y₁, idxs,
- T::Type{Val{1}})
+@inline function interpolant!(out,
+ Θ,
+ id::LinearInterpolation,
+ dt,
+ y₀,
+ y₁,
+ idxs,
+ T::Type{Val{1}})
if out === nothing
return (y₁[idxs] - y₀[idxs]) / dt
elseif idxs === nothing
@@ -539,8 +707,13 @@ end
"""
Constant Interpolation
"""
-@inline function interpolant(Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs,
- T::Type{Val{0}})
+@inline function interpolant(Θ,
+ id::ConstantInterpolation,
+ dt,
+ y₀,
+ y₁,
+ idxs,
+ T::Type{Val{0}})
if idxs === nothing
out = @. y₀
elseif idxs isa Number
@@ -552,8 +725,13 @@ Constant Interpolation
out
end
-@inline function interpolant(Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs,
- T::Type{Val{1}})
+@inline function interpolant(Θ,
+ id::ConstantInterpolation,
+ dt,
+ y₀,
+ y₁,
+ idxs,
+ T::Type{Val{1}})
if idxs === nothing
out = zeros(eltype(y₀), length(y₀))
elseif idxs isa Number
@@ -568,8 +746,14 @@ end
"""
Constant Interpolation
"""
-@inline function interpolant!(out, Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs,
- T::Type{Val{0}})
+@inline function interpolant!(out,
+ Θ,
+ id::ConstantInterpolation,
+ dt,
+ y₀,
+ y₁,
+ idxs,
+ T::Type{Val{0}})
if out === nothing
return y₀[idxs]
elseif idxs === nothing
@@ -582,8 +766,14 @@ end
"""
Constant Interpolation
"""
-@inline function interpolant!(out, Θ, id::ConstantInterpolation, dt, y₀, y₁, idxs,
- T::Type{Val{1}})
+@inline function interpolant!(out,
+ Θ,
+ id::ConstantInterpolation,
+ dt,
+ y₀,
+ y₁,
+ idxs,
+ T::Type{Val{1}})
if out === nothing
return zeros(eltype(y₀), length(idxs))
else
diff --git a/src/operators/basic_operators.jl b/src/operators/basic_operators.jl
index 3faab1451d..ee4d2bafe5 100644
--- a/src/operators/basic_operators.jl
+++ b/src/operators/basic_operators.jl
@@ -110,8 +110,9 @@ end
# propagate_inbounds here for the getindex fallback
Base.@propagate_inbounds Base.convert(::Type{AbstractMatrix}, L::DiffEqArrayOperator) = L.A
Base.@propagate_inbounds Base.setindex!(L::DiffEqArrayOperator, v, i::Int) = (L.A[i] = v)
-Base.@propagate_inbounds function Base.setindex!(L::DiffEqArrayOperator, v,
- I::Vararg{Int, N}) where {N}
+Base.@propagate_inbounds function Base.setindex!(L::DiffEqArrayOperator,
+ v,
+ I::Vararg{Int, N}) where {N}
(L.A[I...] = v)
end
@@ -133,7 +134,8 @@ function Base.copy(L::DiffEqArrayOperator)
end
const AdjointFact = isdefined(LinearAlgebra, :AdjointFactorization) ?
- LinearAlgebra.AdjointFactorization : Adjoint
+ LinearAlgebra.AdjointFactorization :
+ Adjoint
const TransposeFact = isdefined(LinearAlgebra, :TransposeFactorization) ?
LinearAlgebra.TransposeFactorization : Transpose
@@ -144,9 +146,12 @@ Like DiffEqArrayOperator, but stores a Factorization instead.
Supports left division and `ldiv!` when applied to an array.
"""
-struct FactorizedDiffEqArrayOperator{T <: Number,
+struct FactorizedDiffEqArrayOperator{
+ T <: Number,
FType <: Union{
- Factorization{T}, Diagonal{T}, Bidiagonal{T},
+ Factorization{T},
+ Diagonal{T},
+ Bidiagonal{T},
AdjointFact{T, <:Factorization{T}},
TransposeFact{T, <:Factorization{T}},
},
@@ -155,39 +160,39 @@ struct FactorizedDiffEqArrayOperator{T <: Number,
end
function Base.convert(::Type{AbstractMatrix},
- L::FactorizedDiffEqArrayOperator{<:Any,
- <:Union{Factorization, AbstractMatrix
- }})
+ L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Factorization, AbstractMatrix}})
convert(AbstractMatrix, L.F)
end
function Base.convert(::Type{AbstractMatrix},
- L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Adjoint, AdjointFact}
- })
+ L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Adjoint, AdjointFact}})
adjoint(convert(AbstractMatrix, adjoint(L.F)))
end
function Base.convert(::Type{AbstractMatrix},
- L::FactorizedDiffEqArrayOperator{<:Any,
- <:Union{Transpose, TransposeFact}})
+ L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Transpose, TransposeFact}})
transpose(convert(AbstractMatrix, transpose(L.F)))
end
-function Base.Matrix(L::FactorizedDiffEqArrayOperator{<:Any,
- <:Union{Factorization, AbstractMatrix
- }})
+function Base.Matrix(L::FactorizedDiffEqArrayOperator{
+ <:Any,
+ <:Union{Factorization, AbstractMatrix},
+ })
Matrix(L.F)
end
function Base.Matrix(L::FactorizedDiffEqArrayOperator{<:Any, <:Union{Adjoint, AdjointFact}})
adjoint(Matrix(adjoint(L.F)))
end
-function Base.Matrix(L::FactorizedDiffEqArrayOperator{<:Any,
- <:Union{Transpose, TransposeFact}})
+function Base.Matrix(L::FactorizedDiffEqArrayOperator{
+ <:Any,
+ <:Union{Transpose, TransposeFact},
+ })
transpose(Matrix(transpose(L.F)))
end
Base.adjoint(L::FactorizedDiffEqArrayOperator) = FactorizedDiffEqArrayOperator(L.F')
Base.size(L::FactorizedDiffEqArrayOperator, args...) = size(L.F, args...)
-function LinearAlgebra.ldiv!(Y::AbstractVecOrMat, L::FactorizedDiffEqArrayOperator,
- B::AbstractVecOrMat)
+function LinearAlgebra.ldiv!(Y::AbstractVecOrMat,
+ L::FactorizedDiffEqArrayOperator,
+ B::AbstractVecOrMat)
ldiv!(Y, L.F, B)
end
LinearAlgebra.ldiv!(L::FactorizedDiffEqArrayOperator, B::AbstractVecOrMat) = ldiv!(L.F, B)
diff --git a/src/operators/common_defaults.jl b/src/operators/common_defaults.jl
index d3cd298414..e243025e89 100644
--- a/src/operators/common_defaults.jl
+++ b/src/operators/common_defaults.jl
@@ -14,7 +14,7 @@ function LinearAlgebra.opnorm(L::AbstractDiffEqLinearOperator, p::Real = 2)
opnorm(convert(AbstractMatrix, L), p)
end
Base.@propagate_inbounds function Base.getindex(L::AbstractDiffEqLinearOperator,
- I::Vararg{Any, N}) where {N}
+ I::Vararg{Any, N}) where {N}
convert(AbstractMatrix, L)[I...]
end
function Base.getindex(L::AbstractDiffEqLinearOperator, I::Vararg{Int, N}) where {N}
@@ -40,26 +40,34 @@ for op in (:*, :/, :\)
end
### added in https://github.com/SciML/SciMLBase.jl/pull/377
-function LinearAlgebra.mul!(Y::AbstractVecOrMat, L::AbstractDiffEqLinearOperator,
- B::AbstractVecOrMat)
+function LinearAlgebra.mul!(Y::AbstractVecOrMat,
+ L::AbstractDiffEqLinearOperator,
+ B::AbstractVecOrMat)
mul!(Y, convert(AbstractMatrix, L), B)
end
###
-function LinearAlgebra.mul!(Y::AbstractArray, L::AbstractDiffEqLinearOperator,
- B::AbstractArray)
+function LinearAlgebra.mul!(Y::AbstractArray,
+ L::AbstractDiffEqLinearOperator,
+ B::AbstractArray)
mul!(Y, convert(AbstractMatrix, L), B)
end
### added in https://github.com/SciML/SciMLBase.jl/pull/377
-function LinearAlgebra.mul!(Y::AbstractVecOrMat, L::AbstractDiffEqLinearOperator,
- B::AbstractVecOrMat, α::Number, β::Number)
+function LinearAlgebra.mul!(Y::AbstractVecOrMat,
+ L::AbstractDiffEqLinearOperator,
+ B::AbstractVecOrMat,
+ α::Number,
+ β::Number)
mul!(Y, convert(AbstractMatrix, L), B, α, β)
end
###
-function LinearAlgebra.mul!(Y::AbstractArray, L::AbstractDiffEqLinearOperator,
- B::AbstractArray, α::Number, β::Number)
+function LinearAlgebra.mul!(Y::AbstractArray,
+ L::AbstractDiffEqLinearOperator,
+ B::AbstractArray,
+ α::Number,
+ β::Number)
mul!(Y, convert(AbstractMatrix, L), B, α, β)
end
@@ -76,8 +84,20 @@ end
function LinearAlgebra.factorize(L::AbstractDiffEqLinearOperator)
FactorizedDiffEqArrayOperator(factorize(convert(AbstractMatrix, L)))
end
-for fact in (:lu, :lu!, :qr, :qr!, :cholesky, :cholesky!, :ldlt, :ldlt!,
- :bunchkaufman, :bunchkaufman!, :lq, :lq!, :svd, :svd!)
+for fact in (:lu,
+ :lu!,
+ :qr,
+ :qr!,
+ :cholesky,
+ :cholesky!,
+ :ldlt,
+ :ldlt!,
+ :bunchkaufman,
+ :bunchkaufman!,
+ :lq,
+ :lq!,
+ :svd,
+ :svd!)
@eval function LinearAlgebra.$fact(L::AbstractDiffEqLinearOperator, args...)
FactorizedDiffEqArrayOperator($fact(convert(AbstractMatrix, L), args...))
end
diff --git a/src/operators/diffeq_operator.jl b/src/operators/diffeq_operator.jl
index a97063b830..7e6b47e067 100644
--- a/src/operators/diffeq_operator.jl
+++ b/src/operators/diffeq_operator.jl
@@ -7,14 +7,14 @@ AffineDiffEqOperator{T}(As,Bs,du_cache=nothing)
Takes in two tuples for split Affine DiffEqs
-1. update_coefficients! works by updating the coefficients of the component
- operators.
-2. Function calls L(u, p, t) and L(du, u, p, t) are fallbacks interpreted in this form.
- This will allow them to work directly in the nonlinear ODE solvers without
- modification.
-3. f(du, u, p, t) is only allowed if a du_cache is given
-4. B(t) can be Union{Number,AbstractArray}, in which case they are constants.
- Otherwise they are interpreted they are functions v=B(t) and B(v,t)
+ 1. update_coefficients! works by updating the coefficients of the component
+ operators.
+ 2. Function calls L(u, p, t) and L(du, u, p, t) are fallbacks interpreted in this form.
+ This will allow them to work directly in the nonlinear ODE solvers without
+ modification.
+ 3. f(du, u, p, t) is only allowed if a du_cache is given
+ 4. B(t) can be Union{Number,AbstractArray}, in which case they are constants.
+ Otherwise they are interpreted they are functions v=B(t) and B(v,t)
Solvers will see this operator from integrator.f and can interpret it by
checking the internals of As and Bs. For example, it can check isconstant(As[1])
@@ -25,8 +25,7 @@ struct AffineDiffEqOperator{T, T1, T2, U} <: AbstractDiffEqOperator{T}
Bs::T2
du_cache::U
function AffineDiffEqOperator{T}(As, Bs, du_cache = nothing) where {T}
- all([size(a) == size(As[1])
- for a in As]) || error("Operator sizes do not agree")
+ all([size(a) == size(As[1]) for a in As]) || error("Operator sizes do not agree")
new{T, typeof(As), typeof(Bs), typeof(du_cache)}(As, Bs, du_cache)
end
end
@@ -119,8 +118,9 @@ Base.:*(L::DiffEqScaledOperator, x::AbstractArray) = L.coeff * (L.op * x)
Base.:*(x::AbstractVecOrMat, L::DiffEqScaledOperator) = (x * L.op) * L.coeff
Base.:*(x::AbstractArray, L::DiffEqScaledOperator) = (x * L.op) * L.coeff
-function LinearAlgebra.mul!(r::AbstractVecOrMat, L::DiffEqScaledOperator,
- x::AbstractVecOrMat)
+function LinearAlgebra.mul!(r::AbstractVecOrMat,
+ L::DiffEqScaledOperator,
+ x::AbstractVecOrMat)
mul!(r, L.op, x)
r .= r * L.coeff
end
@@ -129,8 +129,9 @@ function LinearAlgebra.mul!(r::AbstractArray, L::DiffEqScaledOperator, x::Abstra
r .= r * L.coeff
end
-function LinearAlgebra.mul!(r::AbstractVecOrMat, x::AbstractVecOrMat,
- L::DiffEqScaledOperator)
+function LinearAlgebra.mul!(r::AbstractVecOrMat,
+ x::AbstractVecOrMat,
+ L::DiffEqScaledOperator)
mul!(r, x, L.op)
r .= r * L.coeff
end
@@ -154,15 +155,16 @@ Base.:\(x::AbstractArray, L::DiffEqScaledOperator) = L.coeff * (x \ L)
for N in (2, 3)
@eval begin
function LinearAlgebra.mul!(Y::AbstractArray{T, $N},
- L::DiffEqScaledOperator{T},
- B::AbstractArray{T, $N}) where {T}
+ L::DiffEqScaledOperator{T},
+ B::AbstractArray{T, $N}) where {T}
LinearAlgebra.lmul!(Y, L.coeff, mul!(Y, L.op, B))
end
end
end
-function LinearAlgebra.ldiv!(Y::AbstractVecOrMat, L::DiffEqScaledOperator,
- B::AbstractVecOrMat)
+function LinearAlgebra.ldiv!(Y::AbstractVecOrMat,
+ L::DiffEqScaledOperator,
+ B::AbstractVecOrMat)
lmul!(1 / L.coeff, ldiv!(Y, L.op, B))
end
function LinearAlgebra.ldiv!(Y::AbstractArray, L::DiffEqScaledOperator, B::AbstractArray)
@@ -170,8 +172,20 @@ function LinearAlgebra.ldiv!(Y::AbstractArray, L::DiffEqScaledOperator, B::Abstr
end
LinearAlgebra.factorize(L::DiffEqScaledOperator) = L.coeff * factorize(L.op)
-for fact in (:lu, :lu!, :qr, :qr!, :cholesky, :cholesky!, :ldlt, :ldlt!,
- :bunchkaufman, :bunchkaufman!, :lq, :lq!, :svd, :svd!)
+for fact in (:lu,
+ :lu!,
+ :qr,
+ :qr!,
+ :cholesky,
+ :cholesky!,
+ :ldlt,
+ :ldlt!,
+ :bunchkaufman,
+ :bunchkaufman!,
+ :lq,
+ :lq!,
+ :svd,
+ :svd!)
@eval function LinearAlgebra.$fact(L::DiffEqScaledOperator, args...)
L.coeff * fact(L.op, args...)
end
diff --git a/src/performance_warnings.jl b/src/performance_warnings.jl
index 28bf92e423..6a4046e7ea 100644
--- a/src/performance_warnings.jl
+++ b/src/performance_warnings.jl
@@ -24,7 +24,10 @@ To turn it off globally within the active project you can execute the following
```julia
using Preferences, UUIDs
-set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PerformanceWarnings" => false)
+set_preferences!(
+ UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"),
+ "PerformanceWarnings" => false,
+)
```
"""
function warn_paramtype(p, warn_performance = PERFORMANCE_WARNINGS)
diff --git a/src/problems/analytical_problems.jl b/src/problems/analytical_problems.jl
index d6b744885a..93766377c1 100644
--- a/src/problems/analytical_problems.jl
+++ b/src/problems/analytical_problems.jl
@@ -8,13 +8,15 @@ struct AnalyticalProblem{uType, tType, isinplace, P, F, K} <:
tspan::tType
p::P
kwargs::K
- @add_kwonly function AnalyticalProblem{iip}(f, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
+ @add_kwonly function AnalyticalProblem{iip}(f,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan), iip, typeof(p),
- typeof(f), typeof(kwargs)}(f,
+ new{typeof(_u0), typeof(_tspan), iip, typeof(p), typeof(f), typeof(kwargs)}(f,
_u0,
_tspan,
p,
diff --git a/src/problems/basic_problems.jl b/src/problems/basic_problems.jl
index f9df45b143..7f17f86d4a 100644
--- a/src/problems/basic_problems.jl
+++ b/src/problems/basic_problems.jl
@@ -59,10 +59,16 @@ struct LinearProblem{uType, isinplace, F, bType, P, K} <:
u0::uType
p::P
kwargs::K
- @add_kwonly function LinearProblem{iip}(A, b, p = NullParameters(); u0 = nothing,
- kwargs...) where {iip}
+ @add_kwonly function LinearProblem{iip}(A,
+ b,
+ p = NullParameters();
+ u0 = nothing,
+ kwargs...,) where {iip}
warn_paramtype(p)
- new{typeof(u0), iip, typeof(A), typeof(b), typeof(p), typeof(kwargs)}(A, b, u0, p,
+ new{typeof(u0), iip, typeof(A), typeof(b), typeof(p), typeof(kwargs)}(A,
+ b,
+ u0,
+ p,
kwargs)
end
end
@@ -147,10 +153,9 @@ struct IntervalNonlinearProblem{isinplace, tType, P, F, K, PT <: AbstractProblem
tspan,
p = NullParameters(),
problem_type = StandardNonlinearProblem();
- kwargs...) where {iip}
+ kwargs...,) where {iip}
warn_paramtype(p)
- new{iip, typeof(tspan), typeof(p), typeof(f),
- typeof(kwargs), typeof(problem_type)}(f,
+ new{iip, typeof(tspan), typeof(p), typeof(f), typeof(kwargs), typeof(problem_type)}(f,
tspan,
p,
problem_type,
@@ -176,8 +181,10 @@ $(SIGNATURES)
Define a nonlinear problem using an instance of
[`IntervalNonlinearFunction`](@ref IntervalNonlinearFunction).
"""
-function IntervalNonlinearProblem(f::AbstractIntervalNonlinearFunction, tspan,
- p = NullParameters(); kwargs...)
+function IntervalNonlinearProblem(f::AbstractIntervalNonlinearFunction,
+ tspan,
+ p = NullParameters();
+ kwargs...,)
IntervalNonlinearProblem{isinplace(f)}(f, tspan, p; kwargs...)
end
@@ -240,13 +247,13 @@ struct NonlinearProblem{uType, isinplace, P, F, K, PT <: AbstractProblemType} <:
p::P
problem_type::PT
kwargs::K
- @add_kwonly function NonlinearProblem{iip}(f::AbstractNonlinearFunction{iip}, u0,
+ @add_kwonly function NonlinearProblem{iip}(f::AbstractNonlinearFunction{iip},
+ u0,
p = NullParameters(),
problem_type = StandardNonlinearProblem();
- kwargs...) where {iip}
+ kwargs...,) where {iip}
warn_paramtype(p)
- new{typeof(u0), iip, typeof(p), typeof(f),
- typeof(kwargs), typeof(problem_type)}(f,
+ new{typeof(u0), iip, typeof(p), typeof(f), typeof(kwargs), typeof(problem_type)}(f,
u0,
p,
problem_type,
@@ -364,7 +371,11 @@ struct NonlinearLeastSquaresProblem{uType, isinplace, P, F, K} <:
kwargs::K
@add_kwonly function NonlinearLeastSquaresProblem{iip}(f::AbstractNonlinearFunction{
- iip}, u0, p = NullParameters(); kwargs...) where {iip}
+ iip,
+ },
+ u0,
+ p = NullParameters();
+ kwargs...,) where {iip}
warn_paramtype(p)
return new{typeof(u0), iip, typeof(p), typeof(f), typeof(kwargs)}(f, u0, p, kwargs)
end
@@ -382,8 +393,10 @@ $(SIGNATURES)
Define a nonlinear least squares problem using an instance of
[`AbstractNonlinearFunction`](@ref AbstractNonlinearFunction).
"""
-function NonlinearLeastSquaresProblem(f::AbstractNonlinearFunction, u0,
- p = NullParameters(); kwargs...)
+function NonlinearLeastSquaresProblem(f::AbstractNonlinearFunction,
+ u0,
+ p = NullParameters();
+ kwargs...,)
return NonlinearLeastSquaresProblem{isinplace(f)}(f, u0, p; kwargs...)
end
@@ -444,12 +457,12 @@ struct IntegralProblem{isinplace, P, F, T, K} <: AbstractIntegralProblem{isinpla
domain::T
p::P
kwargs::K
- @add_kwonly function IntegralProblem{iip}(f::AbstractIntegralFunction{iip}, domain,
+ @add_kwonly function IntegralProblem{iip}(f::AbstractIntegralFunction{iip},
+ domain,
p = NullParameters();
- kwargs...) where {iip}
+ kwargs...,) where {iip}
warn_paramtype(p)
- new{iip, typeof(p), typeof(f), typeof(domain), typeof(kwargs)}(f,
- domain, p, kwargs)
+ new{iip, typeof(p), typeof(f), typeof(domain), typeof(kwargs)}(f, domain, p, kwargs)
end
end
@@ -458,7 +471,7 @@ TruncatedStacktraces.@truncate_stacktrace IntegralProblem 1 4
function IntegralProblem(f::AbstractIntegralFunction,
domain,
p = NullParameters();
- kwargs...)
+ kwargs...,)
IntegralProblem{isinplace(f)}(f, domain, p; kwargs...)
end
@@ -466,7 +479,7 @@ function IntegralProblem(f::AbstractIntegralFunction,
lb::B,
ub::B,
p = NullParameters();
- kwargs...) where {B}
+ kwargs...,) where {B}
IntegralProblem{isinplace(f)}(f, (lb, ub), p; kwargs...)
end
@@ -483,13 +496,13 @@ function IntegralProblem(f, args...; nout = nothing, batch = nothing, kwargs...)
else
output_prototype = nout === nothing ? Float64[] :
Matrix{Float64}(undef, nout, 0)
- BatchIntegralFunction(f, output_prototype, max_batch = batch)
+ BatchIntegralFunction(f, output_prototype; max_batch = batch)
end
else
if batch === nothing
IntegralFunction(f)
else
- BatchIntegralFunction(f, max_batch = batch)
+ BatchIntegralFunction(f; max_batch = batch)
end
end
IntegralProblem(g, args...; kwargs...)
@@ -549,9 +562,10 @@ struct SampledIntegralProblem{Y, X, K} <: AbstractIntegralProblem{false}
x::X
dim::Int
kwargs::K
- @add_kwonly function SampledIntegralProblem(y::AbstractArray, x::AbstractVector;
+ @add_kwonly function SampledIntegralProblem(y::AbstractArray,
+ x::AbstractVector;
dim = ndims(y),
- kwargs...)
+ kwargs...,)
@assert dim<=ndims(y) "The integration dimension `dim` is larger than the number of dimensions of the integrand `y`"
@assert length(x)==size(y, dim) "The integrand `y` must have the same length as the sampling points `x` along the integrated dimension."
@assert axes(x, 1)==axes(y, dim) "The integrand `y` must obey the same indexing as the sampling points `x` along the integrated dimension."
@@ -664,18 +678,41 @@ struct OptimizationProblem{iip, F, uType, P, LB, UB, I, LC, UC, S, K} <:
ucons::UC
sense::S
kwargs::K
- @add_kwonly function OptimizationProblem{iip}(f::OptimizationFunction{iip}, u0,
+ @add_kwonly function OptimizationProblem{iip}(f::OptimizationFunction{iip},
+ u0,
p = NullParameters();
- lb = nothing, ub = nothing, int = nothing,
- lcons = nothing, ucons = nothing,
- sense = nothing, kwargs...) where {iip}
+ lb = nothing,
+ ub = nothing,
+ int = nothing,
+ lcons = nothing,
+ ucons = nothing,
+ sense = nothing,
+ kwargs...,) where {iip}
if xor(lb === nothing, ub === nothing)
error("If any of `lb` or `ub` is provided, both must be provided.")
end
warn_paramtype(p)
- new{iip, typeof(f), typeof(u0), typeof(p),
- typeof(lb), typeof(ub), typeof(int), typeof(lcons), typeof(ucons),
- typeof(sense), typeof(kwargs)}(f, u0, p, lb, ub, int, lcons, ucons, sense,
+ new{
+ iip,
+ typeof(f),
+ typeof(u0),
+ typeof(p),
+ typeof(lb),
+ typeof(ub),
+ typeof(int),
+ typeof(lcons),
+ typeof(ucons),
+ typeof(sense),
+ typeof(kwargs),
+ }(f,
+ u0,
+ p,
+ lb,
+ ub,
+ int,
+ lcons,
+ ucons,
+ sense,
kwargs)
end
end
@@ -686,13 +723,13 @@ function OptimizationProblem(f::OptimizationFunction, args...; kwargs...)
OptimizationProblem{isinplace(f)}(f, args...; kwargs...)
end
function OptimizationProblem(f, args...; kwargs...)
- isinplace(f, 2, has_two_dispatches = false)
+ isinplace(f, 2; has_two_dispatches = false)
OptimizationProblem{true}(OptimizationFunction{true}(f), args...; kwargs...)
end
function OptimizationFunction(f::NonlinearFunction,
adtype::AbstractADType = NoAD();
- kwargs...)
+ kwargs...,)
if isinplace(f)
throw(ArgumentError("Converting NonlinearFunction to OptimizationFunction is not supported with in-place functions yet."))
end
@@ -701,7 +738,7 @@ end
function OptimizationProblem(prob::NonlinearLeastSquaresProblem,
adtype::AbstractADType = NoAD();
- kwargs...)
+ kwargs...,)
if isinplace(prob)
throw(ArgumentError("Converting NonlinearLeastSquaresProblem to OptimizationProblem is not supported with in-place functions yet."))
end
diff --git a/src/problems/bvp_problems.jl b/src/problems/bvp_problems.jl
index 7d1ff6d53d..85157ccaa0 100644
--- a/src/problems/bvp_problems.jl
+++ b/src/problems/bvp_problems.jl
@@ -114,8 +114,12 @@ struct BVProblem{uType, tType, isinplace, P, F, PT <: AbstractProblemType, K} <:
problem_type::PT
kwargs::K
- @add_kwonly function BVProblem{iip}(f::AbstractBVPFunction{iip, TP}, u0, tspan,
- p = NullParameters(); problem_type = nothing, kwargs...) where {iip, TP}
+ @add_kwonly function BVProblem{iip}(f::AbstractBVPFunction{iip, TP},
+ u0,
+ tspan,
+ p = NullParameters();
+ problem_type = nothing,
+ kwargs...,) where {iip, TP}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
@@ -126,8 +130,15 @@ struct BVProblem{uType, tType, isinplace, P, F, PT <: AbstractProblemType, K} <:
else
@assert prob_type===problem_type "This indicates incorrect problem type specification! Users should never pass in `problem_type` kwarg, this exists exclusively for internal use."
end
- return new{typeof(_u0), typeof(_tspan), iip, typeof(p), typeof(f),
- typeof(problem_type), typeof(kwargs)}(f,
+ return new{
+ typeof(_u0),
+ typeof(_tspan),
+ iip,
+ typeof(p),
+ typeof(f),
+ typeof(problem_type),
+ typeof(kwargs),
+ }(f,
_u0,
_tspan,
p,
@@ -162,35 +173,65 @@ end
return BVPFunction{iip}(args...; kwargs..., twopoint = Val(true))
end
-function TwoPointBVProblem{iip}(f, bc, u0, tspan, p = NullParameters();
- bcresid_prototype = nothing, kwargs...) where {iip}
- return TwoPointBVProblem(TwoPointBVPFunction{iip}(f, bc; bcresid_prototype), u0, tspan,
- p; kwargs...)
+function TwoPointBVProblem{iip}(f,
+ bc,
+ u0,
+ tspan,
+ p = NullParameters();
+ bcresid_prototype = nothing,
+ kwargs...,) where {iip}
+ return TwoPointBVProblem(TwoPointBVPFunction{iip}(f, bc; bcresid_prototype),
+ u0,
+ tspan,
+ p;
+ kwargs...,)
end
-function TwoPointBVProblem(f, bc, u0, tspan, p = NullParameters();
- bcresid_prototype = nothing, kwargs...)
- return TwoPointBVProblem(TwoPointBVPFunction(f, bc; bcresid_prototype), u0, tspan, p;
- kwargs...)
+function TwoPointBVProblem(f,
+ bc,
+ u0,
+ tspan,
+ p = NullParameters();
+ bcresid_prototype = nothing,
+ kwargs...,)
+ return TwoPointBVProblem(TwoPointBVPFunction(f, bc; bcresid_prototype),
+ u0,
+ tspan,
+ p;
+ kwargs...,)
end
-function TwoPointBVProblem{iip}(f::AbstractBVPFunction{iip, twopoint}, u0, tspan,
- p = NullParameters(); kwargs...) where {iip, twopoint}
+function TwoPointBVProblem{iip}(f::AbstractBVPFunction{iip, twopoint},
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip, twopoint}
@assert twopoint "`TwoPointBVProblem` can only be used with a `TwoPointBVPFunction`. Instead of using `BVPFunction`, use `TwoPointBVPFunction` or pass a kwarg `twopoint=Val(true)` during the construction of the `BVPFunction`."
return BVProblem{iip}(f, u0, tspan, p; kwargs...)
end
-function TwoPointBVProblem(f::AbstractBVPFunction{iip, twopoint}, u0, tspan,
- p = NullParameters(); kwargs...) where {iip, twopoint}
+function TwoPointBVProblem(f::AbstractBVPFunction{iip, twopoint},
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip, twopoint}
@assert twopoint "`TwoPointBVProblem` can only be used with a `TwoPointBVPFunction`. Instead of using `BVPFunction`, use `TwoPointBVPFunction` or pass a kwarg `twopoint=Val(true)` during the construction of the `BVPFunction`."
return BVProblem{iip}(f, u0, tspan, p; kwargs...)
end
# Allow previous timeseries solution
-function TwoPointBVProblem(f::AbstractODEFunction, bc, sol::T, tspan::Tuple,
- p = NullParameters(); kwargs...) where {T <: AbstractTimeseriesSolution}
+function TwoPointBVProblem(f::AbstractODEFunction,
+ bc,
+ sol::T,
+ tspan::Tuple,
+ p = NullParameters();
+ kwargs...,) where {T <: AbstractTimeseriesSolution}
return TwoPointBVProblem(f, bc, sol.u, tspan, p; kwargs...)
end
# Allow initial guess function for the initial guess
-function TwoPointBVProblem(f::AbstractODEFunction, bc, initialGuess, tspan::AbstractVector,
- p = NullParameters(); kwargs...)
+function TwoPointBVProblem(f::AbstractODEFunction,
+ bc,
+ initialGuess,
+ tspan::AbstractVector,
+ p = NullParameters();
+ kwargs...,)
u0 = [initialGuess(i) for i in tspan]
return TwoPointBVProblem(f, bc, u0, (tspan[1], tspan[end]), p; kwargs...)
end
diff --git a/src/problems/dae_problems.jl b/src/problems/dae_problems.jl
index e5e532aac3..aa169a228b 100644
--- a/src/problems/dae_problems.jl
+++ b/src/problems/dae_problems.jl
@@ -77,9 +77,12 @@ struct DAEProblem{uType, duType, tType, isinplace, P, F, K, D} <:
kwargs::K
differential_vars::D
@add_kwonly function DAEProblem{iip}(f::AbstractDAEFunction{iip},
- du0, u0, tspan, p = NullParameters();
- differential_vars = nothing,
- kwargs...) where {iip}
+ du0,
+ u0,
+ tspan,
+ p = NullParameters();
+ differential_vars = nothing,
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_du0 = prepare_initial_state(du0)
if !isnothing(_u0)
@@ -93,11 +96,22 @@ struct DAEProblem{uType, duType, tType, isinplace, P, F, K, D} <:
end
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_du0), typeof(_tspan),
- isinplace(f), typeof(p),
- typeof(f), typeof(kwargs),
- typeof(differential_vars)}(f, _du0, _u0, _tspan, p,
- kwargs, differential_vars)
+ new{
+ typeof(_u0),
+ typeof(_du0),
+ typeof(_tspan),
+ isinplace(f),
+ typeof(p),
+ typeof(f),
+ typeof(kwargs),
+ typeof(differential_vars),
+ }(f,
+ _du0,
+ _u0,
+ _tspan,
+ p,
+ kwargs,
+ differential_vars)
end
function DAEProblem{iip}(f, du0, u0, tspan, p = NullParameters(); kwargs...) where {iip}
diff --git a/src/problems/dde_problems.jl b/src/problems/dde_problems.jl
index 509e8e8d82..43b765df8e 100644
--- a/src/problems/dde_problems.jl
+++ b/src/problems/dde_problems.jl
@@ -212,8 +212,7 @@ struct DDEProblem{
H,
K,
PT <: AbstractProblemType,
-} <:
- AbstractDDEProblem{uType, tType, lType, isinplace}
+} <: AbstractDDEProblem{uType, tType, lType, isinplace}
f::F
u0::uType
h::H
@@ -226,21 +225,33 @@ struct DDEProblem{
order_discontinuity_t0::Int
problem_type::PT
- @add_kwonly function DDEProblem{iip}(f::AbstractDDEFunction{iip}, u0, h, tspan,
+ @add_kwonly function DDEProblem{iip}(f::AbstractDDEFunction{iip},
+ u0,
+ h,
+ tspan,
p = NullParameters();
constant_lags = (),
dependent_lags = (),
- neutral = f.mass_matrix !== I &&
- det(f.mass_matrix) != 1,
+ neutral = f.mass_matrix !== I && det(f.mass_matrix) != 1,
order_discontinuity_t0 = 0,
problem_type = StandardDDEProblem(),
- kwargs...) where {iip}
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan), typeof(constant_lags), typeof(dependent_lags),
+ new{
+ typeof(_u0),
+ typeof(_tspan),
+ typeof(constant_lags),
+ typeof(dependent_lags),
isinplace(f),
- typeof(p), typeof(f), typeof(h), typeof(kwargs), typeof(problem_type)}(f, _u0,
+ typeof(p),
+ typeof(f),
+ typeof(h),
+ typeof(kwargs),
+ typeof(problem_type),
+ }(f,
+ _u0,
h,
_tspan,
p,
@@ -252,11 +263,19 @@ struct DDEProblem{
problem_type)
end
- function DDEProblem{iip}(f::AbstractDDEFunction{iip}, h, tspan::Tuple,
+ function DDEProblem{iip}(f::AbstractDDEFunction{iip},
+ h,
+ tspan::Tuple,
p = NullParameters();
- order_discontinuity_t0 = 1, kwargs...) where {iip}
- DDEProblem{iip}(f, h(p, first(tspan)), h, tspan, p;
- order_discontinuity_t0 = max(1, order_discontinuity_t0), kwargs...)
+ order_discontinuity_t0 = 1,
+ kwargs...,) where {iip}
+ DDEProblem{iip}(f,
+ h(p, first(tspan)),
+ h,
+ tspan,
+ p;
+ order_discontinuity_t0 = max(1, order_discontinuity_t0),
+ kwargs...,)
end
function DDEProblem{iip}(f, args...; kwargs...) where {iip}
@@ -289,16 +308,29 @@ struct DynamicalDDEProblem{iip} <: AbstractDynamicalDDEProblem end
Define a dynamical DDE problem from a [`DynamicalDDEFunction`](@ref).
"""
-function DynamicalDDEProblem(f::DynamicalDDEFunction, v0, u0, h, tspan,
- p = NullParameters(); dependent_lags = (), kwargs...)
- DDEProblem(f, ArrayPartition(v0, u0), h, tspan, p;
+function DynamicalDDEProblem(f::DynamicalDDEFunction,
+ v0,
+ u0,
+ h,
+ tspan,
+ p = NullParameters();
+ dependent_lags = (),
+ kwargs...,)
+ DDEProblem(f,
+ ArrayPartition(v0, u0),
+ h,
+ tspan,
+ p;
problem_type = DynamicalDDEProblem{isinplace(f)}(),
dependent_lags = ntuple(i -> (u, p, t) -> dependent_lags[i](u[1], u[2], p, t),
length(dependent_lags)),
- kwargs...)
+ kwargs...,)
end
-function DynamicalDDEProblem(f::DynamicalDDEFunction, h, tspan, p = NullParameters();
- kwargs...)
+function DynamicalDDEProblem(f::DynamicalDDEFunction,
+ h,
+ tspan,
+ p = NullParameters();
+ kwargs...,)
DynamicalDDEProblem(f, h(p, first(tspan))..., h, tspan, p; kwargs...)
end
function DynamicalDDEProblem(f1, f2, args...; kwargs...)
@@ -311,12 +343,13 @@ end
Define a dynamical DDE problem from the two functions `f1` and `f2`.
# Arguments
-* `f1` and `f2`: The functions in the DDE.
-* `v0` and `u0`: The initial conditions.
-* `h`: The initial history function.
-* `tspan`: The timespan for the problem.
-* `p`: Parameter values for `f1` and `f2`.
-* `callback`: A callback to be applied to every solver which uses the problem. Defaults to nothing.
+
+ - `f1` and `f2`: The functions in the DDE.
+ - `v0` and `u0`: The initial conditions.
+ - `h`: The initial history function.
+ - `tspan`: The timespan for the problem.
+ - `p`: Parameter values for `f1` and `f2`.
+ - `callback`: A callback to be applied to every solver which uses the problem. Defaults to nothing.
`isinplace` optionally sets whether the function is inplace or not.
This is determined automatically, but not inferred.
@@ -341,13 +374,14 @@ end
Define a second order DDE problem with the specified function.
# Arguments
-* `f`: The function for the second derivative.
-* `du0`: The initial derivative.
-* `u0`: The initial condition.
-* `h`: The initial history function.
-* `tspan`: The timespan for the problem.
-* `p`: Parameter values for `f`.
-* `callback`: A callback to be applied to every solver which uses the problem. Defaults to nothing.
+
+ - `f`: The function for the second derivative.
+ - `du0`: The initial derivative.
+ - `u0`: The initial condition.
+ - `h`: The initial history function.
+ - `tspan`: The timespan for the problem.
+ - `p`: Parameter values for `f`.
+ - `callback`: A callback to be applied to every solver which uses the problem. Defaults to nothing.
`isinplace` optionally sets whether the function is inplace or not.
This is determined automatically, but not inferred.
@@ -362,8 +396,11 @@ function SecondOrderDDEProblem{iip}(f, args...; kwargs...) where {iip}
v
end
end
- DynamicalDDEProblem{iip}(f, f2, args...; problem_type = SecondOrderDDEProblem{iip}(),
- kwargs...)
+ DynamicalDDEProblem{iip}(f,
+ f2,
+ args...;
+ problem_type = SecondOrderDDEProblem{iip}(),
+ kwargs...,)
end
function SecondOrderDDEProblem(f::DynamicalDDEFunction, args...; kwargs...)
iip = isinplace(f.f1, 6)
@@ -377,16 +414,20 @@ function SecondOrderDDEProblem(f::DynamicalDDEFunction, args...; kwargs...)
v
end
end
- return DynamicalDDEProblem(DynamicalDDEFunction{iip}(f.f1, f2;
+ return DynamicalDDEProblem(DynamicalDDEFunction{iip}(f.f1,
+ f2;
mass_matrix = f.mass_matrix,
- analytic = f.analytic),
- args...; problem_type = SecondOrderDDEProblem{iip}(),
- kwargs...)
+ analytic = f.analytic,),
+ args...;
+ problem_type = SecondOrderDDEProblem{iip}(),
+ kwargs...,)
else
- return DynamicalDDEProblem(DynamicalDDEFunction{iip}(f.f1, f.f2;
+ return DynamicalDDEProblem(DynamicalDDEFunction{iip}(f.f1,
+ f.f2;
mass_matrix = f.mass_matrix,
- analytic = f.analytic),
- args...; problem_type = SecondOrderDDEProblem{iip}(),
- kwargs...)
+ analytic = f.analytic,),
+ args...;
+ problem_type = SecondOrderDDEProblem{iip}(),
+ kwargs...,)
end
end
diff --git a/src/problems/discrete_problems.jl b/src/problems/discrete_problems.jl
index 4b06bdfa59..b1c3413f05 100644
--- a/src/problems/discrete_problems.jl
+++ b/src/problems/discrete_problems.jl
@@ -88,29 +88,37 @@ struct DiscreteProblem{uType, tType, isinplace, P, F, K} <:
""" A callback to be applied to every solver which uses the problem."""
kwargs::K
@add_kwonly function DiscreteProblem{iip}(f::AbstractDiscreteFunction{iip},
- u0, tspan::Tuple, p = NullParameters();
- kwargs...) where {iip}
+ u0,
+ tspan::Tuple,
+ p = NullParameters();
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan), isinplace(f, 4),
+ new{
+ typeof(_u0),
+ typeof(_tspan),
+ isinplace(f, 4),
typeof(p),
- typeof(f), typeof(kwargs)}(f,
+ typeof(f),
+ typeof(kwargs),
+ }(f,
_u0,
_tspan,
p,
kwargs)
end
- function DiscreteProblem{iip}(u0::Nothing, tspan::Nothing, p = NullParameters();
- callback = nothing) where {iip}
+ function DiscreteProblem{iip}(u0::Nothing,
+ tspan::Nothing,
+ p = NullParameters();
+ callback = nothing,) where {iip}
if iip
f = DISCRETE_INPLACE_DEFAULT
else
f = DISCRETE_OUTOFPLACE_DEFAULT
end
- new{Nothing, Nothing, iip, typeof(p),
- typeof(f), typeof(callback)}(f,
+ new{Nothing, Nothing, iip, typeof(p), typeof(f), typeof(callback)}(f,
nothing,
nothing,
p,
@@ -129,13 +137,19 @@ TruncatedStacktraces.@truncate_stacktrace DiscreteProblem 3 1 2
Defines a discrete problem with the specified functions.
"""
-function DiscreteProblem(f::AbstractDiscreteFunction, u0, tspan::Tuple,
- p = NullParameters(); kwargs...)
+function DiscreteProblem(f::AbstractDiscreteFunction,
+ u0,
+ tspan::Tuple,
+ p = NullParameters();
+ kwargs...,)
DiscreteProblem{isinplace(f)}(f, u0, tspan, p; kwargs...)
end
-function DiscreteProblem(f::Base.Callable, u0, tspan::Tuple, p = NullParameters();
- kwargs...)
+function DiscreteProblem(f::Base.Callable,
+ u0,
+ tspan::Tuple,
+ p = NullParameters();
+ kwargs...,)
iip = isinplace(f, 4)
DiscreteProblem(DiscreteFunction{iip}(f), u0, tspan, p; kwargs...)
end
@@ -145,8 +159,10 @@ $(SIGNATURES)
Define a discrete problem with the identity map.
"""
-function DiscreteProblem(u0::Union{AbstractArray, Number}, tspan::Tuple,
- p = NullParameters(); kwargs...)
+function DiscreteProblem(u0::Union{AbstractArray, Number},
+ tspan::Tuple,
+ p = NullParameters();
+ kwargs...,)
iip = u0 isa AbstractArray
if iip
f = DISCRETE_INPLACE_DEFAULT
diff --git a/src/problems/implicit_discrete_problems.jl b/src/problems/implicit_discrete_problems.jl
index b6463fa6be..417a6b3a4c 100644
--- a/src/problems/implicit_discrete_problems.jl
+++ b/src/problems/implicit_discrete_problems.jl
@@ -80,26 +80,33 @@ struct ImplicitDiscreteProblem{uType, tType, isinplace, P, F, K} <:
p::P
""" A callback to be applied to every solver which uses the problem."""
kwargs::K
- @add_kwonly function ImplicitDiscreteProblem{iip}(f::ImplicitDiscreteFunction{
- iip,
- },
- u0, tspan::Tuple,
- p = NullParameters();
- kwargs...) where {iip}
+ @add_kwonly function ImplicitDiscreteProblem{iip}(f::ImplicitDiscreteFunction{iip},
+ u0,
+ tspan::Tuple,
+ p = NullParameters();
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan), isinplace(f, 6),
+ new{
+ typeof(_u0),
+ typeof(_tspan),
+ isinplace(f, 6),
typeof(p),
- typeof(f), typeof(kwargs)}(f,
+ typeof(f),
+ typeof(kwargs),
+ }(f,
_u0,
_tspan,
p,
kwargs)
end
- function ImplicitDiscreteProblem{iip}(f, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
+ function ImplicitDiscreteProblem{iip}(f,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
ImplicitDiscreteProblem(ImplicitDiscreteFunction{iip}(f), u0, tspan, p; kwargs...)
end
end
@@ -111,13 +118,15 @@ TruncatedStacktraces.@truncate_stacktrace ImplicitDiscreteProblem 3 1 2
Defines a discrete problem with the specified functions.
"""
-function ImplicitDiscreteProblem(f::ImplicitDiscreteFunction, u0, tspan::Tuple,
- p = NullParameters(); kwargs...)
+function ImplicitDiscreteProblem(f::ImplicitDiscreteFunction,
+ u0,
+ tspan::Tuple,
+ p = NullParameters();
+ kwargs...,)
ImplicitDiscreteProblem{isinplace(f, 6)}(f, u0, tspan, p; kwargs...)
end
-function ImplicitDiscreteProblem(f, u0, tspan, p = NullParameters();
- kwargs...)
+function ImplicitDiscreteProblem(f, u0, tspan, p = NullParameters(); kwargs...)
iip = isinplace(f, 6)
ImplicitDiscreteProblem(ImplicitDiscreteFunction{iip}(f), u0, tspan, p; kwargs...)
end
diff --git a/src/problems/ode_problems.jl b/src/problems/ode_problems.jl
index dbe8210286..33ab14a995 100644
--- a/src/problems/ode_problems.jl
+++ b/src/problems/ode_problems.jl
@@ -109,16 +109,23 @@ mutable struct ODEProblem{uType, tType, isinplace, P, F, K, PT <: AbstractProble
"""An internal argument for storing traits about the solving process."""
problem_type::PT
@add_kwonly function ODEProblem{iip}(f::AbstractODEFunction{iip},
- u0, tspan, p = NullParameters(),
+ u0,
+ tspan,
+ p = NullParameters(),
problem_type = StandardODEProblem();
- kwargs...) where {iip}
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan),
- isinplace(f), typeof(p), typeof(f),
+ new{
+ typeof(_u0),
+ typeof(_tspan),
+ isinplace(f),
+ typeof(p),
+ typeof(f),
typeof(kwargs),
- typeof(problem_type)}(f,
+ typeof(problem_type),
+ }(f,
_u0,
_tspan,
p,
@@ -140,24 +147,28 @@ mutable struct ODEProblem{uType, tType, isinplace, P, F, K, PT <: AbstractProble
ODEProblem(_f, _u0, _tspan, p; kwargs...)
end
- @add_kwonly function ODEProblem{iip, recompile}(f, u0, tspan, p = NullParameters();
- kwargs...) where {iip, recompile}
+ @add_kwonly function ODEProblem{iip, recompile}(f,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip, recompile}
ODEProblem{iip}(ODEFunction{iip, recompile}(f), u0, tspan, p; kwargs...)
end
- function ODEProblem{iip, FunctionWrapperSpecialize}(f, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
+ function ODEProblem{iip, FunctionWrapperSpecialize}(f,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
if !(f isa FunctionWrappersWrappers.FunctionWrappersWrapper)
if iip
ff = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_iip(f,
- (_u0, _u0, p,
- _tspan[1])))
+ (_u0, _u0, p, _tspan[1])))
else
ff = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_oop(f,
- (_u0, p,
- _tspan[1])))
+ (_u0, p, _tspan[1])))
end
end
ODEProblem{iip}(ff, _u0, _tspan, p; kwargs...)
@@ -261,18 +272,31 @@ struct DynamicalODEProblem{iip} <: AbstractDynamicalODEProblem end
Define a dynamical ODE function from a [`DynamicalODEFunction`](@ref).
"""
-function DynamicalODEProblem(f::DynamicalODEFunction, du0, u0, tspan, p = NullParameters();
- kwargs...)
+function DynamicalODEProblem(f::DynamicalODEFunction,
+ du0,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,)
ODEProblem(f, ArrayPartition(du0, u0), tspan, p; kwargs...)
end
function DynamicalODEProblem(f1, f2, du0, u0, tspan, p = NullParameters(); kwargs...)
ODEProblem(DynamicalODEFunction(f1, f2), ArrayPartition(du0, u0), tspan, p; kwargs...)
end
-function DynamicalODEProblem{iip}(f1, f2, du0, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
- ODEProblem(DynamicalODEFunction{iip}(f1, f2), ArrayPartition(du0, u0), tspan, p,
- DynamicalODEProblem{iip}(); kwargs...)
+function DynamicalODEProblem{iip}(f1,
+ f2,
+ du0,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
+ ODEProblem(DynamicalODEFunction{iip}(f1, f2),
+ ArrayPartition(du0, u0),
+ tspan,
+ p,
+ DynamicalODEProblem{iip}();
+ kwargs...,)
end
@doc doc"""
@@ -327,8 +351,12 @@ function SecondOrderODEProblem(f, du0, u0, tspan, p = NullParameters(); kwargs..
SecondOrderODEProblem{iip}(f, du0, u0, tspan, p; kwargs...)
end
-function SecondOrderODEProblem{iip}(f, du0, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
+function SecondOrderODEProblem{iip}(f,
+ du0,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
if iip
f2 = function (du, v, u, p, t)
du .= v
@@ -339,11 +367,19 @@ function SecondOrderODEProblem{iip}(f, du0, u0, tspan, p = NullParameters();
end
end
_u0 = ArrayPartition((du0, u0))
- ODEProblem(DynamicalODEFunction{iip}(f, f2), _u0, tspan, p,
- SecondOrderODEProblem{iip}(); kwargs...)
+ ODEProblem(DynamicalODEFunction{iip}(f, f2),
+ _u0,
+ tspan,
+ p,
+ SecondOrderODEProblem{iip}();
+ kwargs...,)
end
-function SecondOrderODEProblem(f::DynamicalODEFunction, du0, u0, tspan,
- p = NullParameters(); kwargs...)
+function SecondOrderODEProblem(f::DynamicalODEFunction,
+ du0,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,)
iip = isinplace(f.f1, 5)
_u0 = ArrayPartition((du0, u0))
if f.f2.f === nothing
@@ -356,13 +392,25 @@ function SecondOrderODEProblem(f::DynamicalODEFunction, du0, u0, tspan,
v
end
end
- return ODEProblem(DynamicalODEFunction{iip}(f.f1, f2; mass_matrix = f.mass_matrix,
- analytic = f.analytic), _u0, tspan, p,
- SecondOrderODEProblem{iip}(); kwargs...)
+ return ODEProblem(DynamicalODEFunction{iip}(f.f1,
+ f2;
+ mass_matrix = f.mass_matrix,
+ analytic = f.analytic,),
+ _u0,
+ tspan,
+ p,
+ SecondOrderODEProblem{iip}();
+ kwargs...,)
else
- return ODEProblem(DynamicalODEFunction{iip}(f.f1, f.f2; mass_matrix = f.mass_matrix,
- analytic = f.analytic), _u0, tspan, p,
- SecondOrderODEProblem{iip}(); kwargs...)
+ return ODEProblem(DynamicalODEFunction{iip}(f.f1,
+ f.f2;
+ mass_matrix = f.mass_matrix,
+ analytic = f.analytic,),
+ _u0,
+ tspan,
+ p,
+ SecondOrderODEProblem{iip}();
+ kwargs...,)
end
end
@@ -439,8 +487,12 @@ function SplitODEProblem(f1, f2, u0, tspan, p = NullParameters(); kwargs...)
SplitODEProblem(f, u0, tspan, p; kwargs...)
end
-function SplitODEProblem{iip}(f1, f2, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
+function SplitODEProblem{iip}(f1,
+ f2,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
f = SplitFunction{iip}(f1, f2)
SplitODEProblem(f, u0, tspan, p; kwargs...)
end
@@ -453,12 +505,18 @@ Define a split ODE problem from a [`SplitFunction`](@ref).
function SplitODEProblem(f::SplitFunction, u0, tspan, p = NullParameters(); kwargs...)
SplitODEProblem{isinplace(f)}(f, u0, tspan, p; kwargs...)
end
-function SplitODEProblem{iip}(f::SplitFunction, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
+function SplitODEProblem{iip}(f::SplitFunction,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
if f.cache === nothing && iip
cache = similar(u0)
- f = SplitFunction{iip}(f.f1, f.f2; mass_matrix = f.mass_matrix,
- _func_cache = cache, analytic = f.analytic)
+ f = SplitFunction{iip}(f.f1,
+ f.f2;
+ mass_matrix = f.mass_matrix,
+ _func_cache = cache,
+ analytic = f.analytic,)
end
ODEProblem(f, u0, tspan, p, SplitODEProblem{iip}(); kwargs...)
end
@@ -477,18 +535,27 @@ function IncrementingODEProblem(f, u0, tspan, p = NullParameters(); kwargs...)
IncrementingODEProblem(f, u0, tspan, p; kwargs...)
end
-function IncrementingODEProblem{iip}(f, u0, tspan, p = NullParameters();
- kwargs...) where {iip}
+function IncrementingODEProblem{iip}(f,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
f = IncrementingODEFunction{iip}(f)
IncrementingODEProblem(f, u0, tspan, p; kwargs...)
end
-function IncrementingODEProblem(f::IncrementingODEFunction, u0, tspan, p = NullParameters();
- kwargs...)
+function IncrementingODEProblem(f::IncrementingODEFunction,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,)
IncrementingODEProblem{isinplace(f)}(f, u0, tspan, p; kwargs...)
end
-function IncrementingODEProblem{iip}(f::IncrementingODEFunction, u0, tspan,
- p = NullParameters(); kwargs...) where {iip}
+function IncrementingODEProblem{iip}(f::IncrementingODEFunction,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,) where {iip}
ODEProblem(f, u0, tspan, p, IncrementingODEProblem{iip}(); kwargs...)
end
diff --git a/src/problems/problem_traits.jl b/src/problems/problem_traits.jl
index dbafebd1f8..1314d1f98e 100644
--- a/src/problems/problem_traits.jl
+++ b/src/problems/problem_traits.jl
@@ -3,29 +3,20 @@
"""
is_diagonal_noise(prob::AbstractSciMLProblem) = false
function is_diagonal_noise(prob::AbstractRODEProblem{
- uType,
- tType,
- iip,
- Nothing,
-}) where {
- uType,
- tType,
- iip,
-}
+ uType,
+ tType,
+ iip,
+ Nothing,
+ }) where {uType, tType, iip}
true
end
function is_diagonal_noise(prob::AbstractSDDEProblem{
- uType,
- tType,
- lType,
- iip,
- Nothing,
-}) where {
- uType,
- tType,
- lType,
- iip,
-}
+ uType,
+ tType,
+ lType,
+ iip,
+ Nothing,
+ }) where {uType, tType, lType, iip}
true
end
@@ -40,44 +31,37 @@ isinplace(prob::AbstractNonlinearProblem{uType, iip}) where {uType, iip} = iip
isinplace(prob::AbstractIntegralProblem{iip}) where {iip} = iip
isinplace(prob::AbstractODEProblem{uType, tType, iip}) where {uType, tType, iip} = iip
function isinplace(prob::AbstractRODEProblem{
- uType,
- tType,
- iip,
- ND,
-}) where {uType, tType,
- iip, ND}
+ uType,
+ tType,
+ iip,
+ ND,
+ }) where {uType, tType, iip, ND}
iip
end
function isinplace(prob::AbstractDDEProblem{
- uType,
- tType,
- lType,
- iip,
-}) where {uType, tType,
- lType, iip}
+ uType,
+ tType,
+ lType,
+ iip,
+ }) where {uType, tType, lType, iip}
iip
end
function isinplace(prob::AbstractDAEProblem{
- uType,
- duType,
- tType,
- iip,
-}) where {uType,
- duType,
- tType, iip}
+ uType,
+ duType,
+ tType,
+ iip,
+ }) where {uType, duType, tType, iip}
iip
end
isinplace(prob::AbstractNoiseProblem) = isinplace(prob.noise)
isinplace(::SplitFunction{iip}) where {iip} = iip
function isinplace(prob::AbstractSDDEProblem{
- uType,
- tType,
- lType,
- iip,
- ND,
-}) where {uType,
- tType,
- lType,
- iip, ND}
+ uType,
+ tType,
+ lType,
+ iip,
+ ND,
+ }) where {uType, tType, lType, iip, ND}
iip
end
diff --git a/src/problems/problem_utils.jl b/src/problems/problem_utils.jl
index c72a451f35..55eb498924 100644
--- a/src/problems/problem_utils.jl
+++ b/src/problems/problem_utils.jl
@@ -18,25 +18,33 @@ end
function Base.summary(io::IO, prob::AbstractDEProblem)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(prob)),
- no_color, " with uType ",
- type_color, typeof(prob.u0),
- no_color, " and tType ",
type_color,
- prob.tspan isa Function ?
- "Unknown" : (prob.tspan === nothing ?
- "Nothing" : typeof(prob.tspan[1])),
- no_color, ". In-place: ",
- type_color, isinplace(prob),
+ nameof(typeof(prob)),
+ no_color,
+ " with uType ",
+ type_color,
+ typeof(prob.u0),
+ no_color,
+ " and tType ",
+ type_color,
+ prob.tspan isa Function ? "Unknown" :
+ (prob.tspan === nothing ? "Nothing" : typeof(prob.tspan[1])),
+ no_color,
+ ". In-place: ",
+ type_color,
+ isinplace(prob),
no_color)
end
function Base.summary(io::IO, prob::AbstractLinearProblem)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(prob)),
- no_color, ". In-place: ",
- type_color, isinplace(prob),
+ type_color,
+ nameof(typeof(prob)),
+ no_color,
+ ". In-place: ",
+ type_color,
+ isinplace(prob),
no_color)
end
function Base.show(io::IO, mime::MIME"text/plain", A::AbstractLinearProblem)
@@ -49,11 +57,16 @@ end
function Base.summary(io::IO, prob::AbstractNonlinearProblem{uType, iip}) where {uType, iip}
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(prob)),
- no_color, " with uType ",
- type_color, uType,
- no_color, ". In-place: ",
- type_color, isinplace(prob),
+ type_color,
+ nameof(typeof(prob)),
+ no_color,
+ " with uType ",
+ type_color,
+ uType,
+ no_color,
+ ". In-place: ",
+ type_color,
+ isinplace(prob),
no_color)
end
function Base.show(io::IO, mime::MIME"text/plain", A::AbstractNonlinearProblem)
@@ -73,9 +86,12 @@ end
function Base.summary(io::IO, prob::AbstractOptimizationProblem)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(prob)),
- no_color, ". In-place: ",
- type_color, isinplace(prob),
+ type_color,
+ nameof(typeof(prob)),
+ no_color,
+ ". In-place: ",
+ type_color,
+ isinplace(prob),
no_color)
end
function Base.show(io::IO, mime::MIME"text/plain", A::AbstractOptimizationProblem)
@@ -88,9 +104,12 @@ end
function Base.summary(io::IO, prob::AbstractIntegralProblem)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(prob)),
- no_color, ". In-place: ",
- type_color, isinplace(prob),
+ type_color,
+ nameof(typeof(prob)),
+ no_color,
+ ". In-place: ",
+ type_color,
+ isinplace(prob),
no_color)
end
function Base.show(io::IO, mime::MIME"text/plain", A::AbstractIntegralProblem)
@@ -100,8 +119,13 @@ end
function Base.summary(io::IO, prob::AbstractNoiseProblem)
print(io,
- nameof(typeof(prob)), " with WType ", typeof(prob.noise.curW), " and tType ",
- typeof(prob.tspan[1]), ". In-place: ", isinplace(prob))
+ nameof(typeof(prob)),
+ " with WType ",
+ typeof(prob.noise.curW),
+ " and tType ",
+ typeof(prob.tspan[1]),
+ ". In-place: ",
+ isinplace(prob))
end
function Base.show(io::IO, mime::MIME"text/plain", A::AbstractDEProblem)
summary(io, A)
@@ -134,10 +158,7 @@ end
function Base.summary(io::IO, prob::AbstractEnsembleProblem)
type_color, no_color = get_colorizers(io)
- print(io,
- nameof(typeof(prob)),
- " with problem ",
- nameof(typeof(prob.prob)))
+ print(io, nameof(typeof(prob)), " with problem ", nameof(typeof(prob.prob)))
end
Base.show(io::IO, mime::MIME"text/plain", A::AbstractEnsembleProblem) = summary(io, A)
@@ -174,9 +195,7 @@ function Base.show(io::IO, mime::MIME"text/plain", A::AbstractPDEProblem)
println(io)
end
function Base.summary(io::IO, prob::AbstractPDEProblem)
- print(io,
- type_color, nameof(typeof(prob)),
- no_color)
+ print(io, type_color, nameof(typeof(prob)), no_color)
end
Base.copy(p::SciMLBase.NullParameters) = p
diff --git a/src/problems/rode_problems.jl b/src/problems/rode_problems.jl
index 18c7a587eb..55daf22af6 100644
--- a/src/problems/rode_problems.jl
+++ b/src/problems/rode_problems.jl
@@ -64,19 +64,34 @@ mutable struct RODEProblem{uType, tType, isinplace, P, NP, F, K, ND} <:
kwargs::K
rand_prototype::ND
seed::UInt64
- @add_kwonly function RODEProblem{iip}(f::RODEFunction{iip}, u0, tspan,
- p = NullParameters();
- rand_prototype = nothing,
- noise = nothing, seed = UInt64(0),
- kwargs...) where {iip}
+ @add_kwonly function RODEProblem{iip}(f::RODEFunction{iip},
+ u0,
+ tspan,
+ p = NullParameters();
+ rand_prototype = nothing,
+ noise = nothing,
+ seed = UInt64(0),
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan),
- isinplace(f), typeof(p),
- typeof(noise), typeof(f), typeof(kwargs),
- typeof(rand_prototype)}(f, _u0, _tspan, p, noise, kwargs,
- rand_prototype, seed)
+ new{
+ typeof(_u0),
+ typeof(_tspan),
+ isinplace(f),
+ typeof(p),
+ typeof(noise),
+ typeof(f),
+ typeof(kwargs),
+ typeof(rand_prototype),
+ }(f,
+ _u0,
+ _tspan,
+ p,
+ noise,
+ kwargs,
+ rand_prototype,
+ seed)
end
function RODEProblem{iip}(f, u0, tspan, p = NullParameters(); kwargs...) where {iip}
RODEProblem(RODEFunction{iip}(f), u0, tspan, p; kwargs...)
diff --git a/src/problems/sdde_problems.jl b/src/problems/sdde_problems.jl
index 76d4b8dc4a..49e978fcad 100644
--- a/src/problems/sdde_problems.jl
+++ b/src/problems/sdde_problems.jl
@@ -117,32 +117,67 @@ struct SDDEProblem{uType, tType, lType, lType2, isinplace, P, NP, F, G, H, K, ND
neutral::Bool
order_discontinuity_t0::Rational{Int}
- @add_kwonly function SDDEProblem{iip}(f::AbstractSDDEFunction{iip}, g, u0, h, tspan,
- p = NullParameters();
- noise_rate_prototype = nothing, noise = nothing,
- seed = UInt64(0),
- constant_lags = (), dependent_lags = (),
- neutral = f.mass_matrix !== I &&
- det(f.mass_matrix) != 1,
- order_discontinuity_t0 = 0 // 1,
- kwargs...) where {iip}
+ @add_kwonly function SDDEProblem{iip}(f::AbstractSDDEFunction{iip},
+ g,
+ u0,
+ h,
+ tspan,
+ p = NullParameters();
+ noise_rate_prototype = nothing,
+ noise = nothing,
+ seed = UInt64(0),
+ constant_lags = (),
+ dependent_lags = (),
+ neutral = f.mass_matrix !== I && det(f.mass_matrix) != 1,
+ order_discontinuity_t0 = 0 // 1,
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan), typeof(constant_lags), typeof(dependent_lags),
+ new{
+ typeof(_u0),
+ typeof(_tspan),
+ typeof(constant_lags),
+ typeof(dependent_lags),
isinplace(f),
- typeof(p), typeof(noise), typeof(f), typeof(g), typeof(h), typeof(kwargs),
- typeof(noise_rate_prototype)}(f, g, _u0, h, _tspan, p, noise, constant_lags,
- dependent_lags, kwargs, noise_rate_prototype,
- seed, neutral, order_discontinuity_t0)
+ typeof(p),
+ typeof(noise),
+ typeof(f),
+ typeof(g),
+ typeof(h),
+ typeof(kwargs),
+ typeof(noise_rate_prototype),
+ }(f,
+ g,
+ _u0,
+ h,
+ _tspan,
+ p,
+ noise,
+ constant_lags,
+ dependent_lags,
+ kwargs,
+ noise_rate_prototype,
+ seed,
+ neutral,
+ order_discontinuity_t0)
end
- function SDDEProblem{iip}(f::AbstractSDDEFunction{iip}, g, h, tspan::Tuple,
- p = NullParameters();
- order_discontinuity_t0 = 1 // 1, kwargs...) where {iip}
- SDDEProblem{iip}(f, g, h(p, first(tspan)), h, tspan, p;
+ function SDDEProblem{iip}(f::AbstractSDDEFunction{iip},
+ g,
+ h,
+ tspan::Tuple,
+ p = NullParameters();
+ order_discontinuity_t0 = 1 // 1,
+ kwargs...,) where {iip}
+ SDDEProblem{iip}(f,
+ g,
+ h(p, first(tspan)),
+ h,
+ tspan,
+ p;
order_discontinuity_t0 = max(1 // 1, order_discontinuity_t0),
- kwargs...)
+ kwargs...,)
end
function SDDEProblem{iip}(f, g, args...; kwargs...) where {iip}
diff --git a/src/problems/sde_problems.jl b/src/problems/sde_problems.jl
index c14d1cb997..0d56ea5e0e 100644
--- a/src/problems/sde_problems.jl
+++ b/src/problems/sde_problems.jl
@@ -94,21 +94,36 @@ struct SDEProblem{uType, tType, isinplace, P, NP, F, G, K, ND} <:
kwargs::K
noise_rate_prototype::ND
seed::UInt64
- @add_kwonly function SDEProblem{iip}(f::AbstractSDEFunction{iip}, u0,
- tspan, p = NullParameters();
+ @add_kwonly function SDEProblem{iip}(f::AbstractSDEFunction{iip},
+ u0,
+ tspan,
+ p = NullParameters();
noise_rate_prototype = nothing,
- noise = nothing, seed = UInt64(0),
- kwargs...) where {iip}
+ noise = nothing,
+ seed = UInt64(0),
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
- new{typeof(_u0), typeof(_tspan),
- isinplace(f), typeof(p),
- typeof(noise), typeof(f), typeof(f.g),
+ new{
+ typeof(_u0),
+ typeof(_tspan),
+ isinplace(f),
+ typeof(p),
+ typeof(noise),
+ typeof(f),
+ typeof(f.g),
typeof(kwargs),
- typeof(noise_rate_prototype)}(f, f.g, _u0, _tspan, p,
- noise, kwargs,
- noise_rate_prototype, seed)
+ typeof(noise_rate_prototype),
+ }(f,
+ f.g,
+ _u0,
+ _tspan,
+ p,
+ noise,
+ kwargs,
+ noise_rate_prototype,
+ seed)
end
function SDEProblem{iip}(f, g, u0, tspan, p = NullParameters(); kwargs...) where {iip}
@@ -148,7 +163,7 @@ function SplitSDEProblem{iip}(f1,
u0,
tspan,
p = NullParameters();
- kwargs...) where {iip}
+ kwargs...,) where {iip}
SplitSDEProblem{iip}(SplitSDEFunction(f1, f2, g), u0, tspan, p; kwargs...)
end
@@ -161,12 +176,20 @@ function SplitSDEProblem(f::SplitSDEFunction, u0, tspan, p = NullParameters(); k
SplitSDEProblem{isinplace(f)}(f, u0, tspan, p; kwargs...)
end
-function SplitSDEProblem{iip}(f::SplitSDEFunction, u0, tspan, p = NullParameters();
- func_cache = nothing, kwargs...) where {iip}
+function SplitSDEProblem{iip}(f::SplitSDEFunction,
+ u0,
+ tspan,
+ p = NullParameters();
+ func_cache = nothing,
+ kwargs...,) where {iip}
if f.cache === nothing && iip
cache = similar(u0)
- _f = SplitSDEFunction{iip}(f.f1, f.f2, f.g; mass_matrix = f.mass_matrix,
- _func_cache = cache, analytic = f.analytic)
+ _f = SplitSDEFunction{iip}(f.f1,
+ f.f2,
+ f.g;
+ mass_matrix = f.mass_matrix,
+ _func_cache = cache,
+ analytic = f.analytic,)
else
_f = f
end
@@ -195,23 +218,35 @@ function DynamicalSDEProblem{iip}(f1,
u0,
tspan,
p = NullParameters();
- kwargs...) where {iip}
+ kwargs...,) where {iip}
ff = DynamicalSDEFunction(f1, f2, g)
DynamicalSDEProblem{iip}(ff, v0, u0, tspan, p; kwargs...)
end
-function DynamicalSDEProblem(f::DynamicalSDEFunction, v0, u0, tspan,
- p = NullParameters(); kwargs...)
+function DynamicalSDEProblem(f::DynamicalSDEFunction,
+ v0,
+ u0,
+ tspan,
+ p = NullParameters();
+ kwargs...,)
DynamicalSDEProblem{isinplace(f)}(f, v0, u0, tspan, p; kwargs...)
end
-function DynamicalSDEProblem{iip}(f::DynamicalSDEFunction, v0, u0, tspan,
+function DynamicalSDEProblem{iip}(f::DynamicalSDEFunction,
+ v0,
+ u0,
+ tspan,
p = NullParameters();
- func_cache = nothing, kwargs...) where {iip}
+ func_cache = nothing,
+ kwargs...,) where {iip}
if f.cache === nothing && iip
cache = similar(u0)
- _f = DynamicalSDEFunction{iip}(f.f1, f.f2, f.g; mass_matrix = f.mass_matrix,
- _func_cache = cache, analytic = f.analytic)
+ _f = DynamicalSDEFunction{iip}(f.f1,
+ f.f2,
+ f.g;
+ mass_matrix = f.mass_matrix,
+ _func_cache = cache,
+ analytic = f.analytic,)
else
_f = f
end
diff --git a/src/problems/steady_state_problems.jl b/src/problems/steady_state_problems.jl
index f25f15e274..5835577185 100644
--- a/src/problems/steady_state_problems.jl
+++ b/src/problems/steady_state_problems.jl
@@ -81,11 +81,14 @@ struct SteadyStateProblem{uType, isinplace, P, F, K} <:
p::P
kwargs::K
@add_kwonly function SteadyStateProblem{iip}(f::AbstractODEFunction{iip},
- u0, p = NullParameters();
- kwargs...) where {iip}
+ u0,
+ p = NullParameters();
+ kwargs...,) where {iip}
_u0 = prepare_initial_state(u0)
warn_paramtype(p)
- new{typeof(_u0), isinplace(f), typeof(p), typeof(f), typeof(kwargs)}(f, _u0, p,
+ new{typeof(_u0), isinplace(f), typeof(p), typeof(f), typeof(kwargs)}(f,
+ _u0,
+ p,
kwargs)
end
diff --git a/src/remake.jl b/src/remake.jl
index a03c11ec52..41a8e33cf7 100644
--- a/src/remake.jl
+++ b/src/remake.jl
@@ -49,12 +49,13 @@ end
Remake the given `ODEProblem`.
If `u0` or `p` are given as symbolic maps `ModelingToolkit.jl` has to be loaded.
"""
-function remake(prob::ODEProblem; f = missing,
- u0 = missing,
- tspan = missing,
- p = missing,
- kwargs = missing,
- _kwargs...)
+function remake(prob::ODEProblem;
+ f = missing,
+ u0 = missing,
+ tspan = missing,
+ p = missing,
+ kwargs = missing,
+ _kwargs...,)
if tspan === missing
tspan = prob.tspan
end
@@ -86,12 +87,10 @@ function remake(prob::ODEProblem; f = missing,
ptspan = promote_tspan(tspan)
if iip
_f = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_iip(unwrapped_f(prob.f.f),
- (u0, u0, p,
- ptspan[1])))
+ (u0, u0, p, ptspan[1])))
else
_f = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_oop(unwrapped_f(prob.f.f),
- (u0, p,
- ptspan[1])))
+ (u0, p, ptspan[1])))
end
else
_f = prob.f
@@ -102,8 +101,7 @@ function remake(prob::ODEProblem; f = missing,
ptspan = promote_tspan(tspan)
if iip
_f = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_iip(f,
- (u0, u0, p,
- ptspan[1])))
+ (u0, u0, p, ptspan[1])))
else
_f = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_oop(f,
(u0, p, ptspan[1])))
@@ -113,8 +111,13 @@ function remake(prob::ODEProblem; f = missing,
end
if kwargs === missing
- ODEProblem{isinplace(prob)}(_f, u0, tspan, p, prob.problem_type; prob.kwargs...,
- _kwargs...)
+ ODEProblem{isinplace(prob)}(_f,
+ u0,
+ tspan,
+ p,
+ prob.problem_type;
+ prob.kwargs...,
+ _kwargs...,)
else
ODEProblem{isinplace(prob)}(_f, u0, tspan, p, prob.problem_type; kwargs...)
end
@@ -126,8 +129,15 @@ end
Remake the given `BVProblem`.
"""
-function remake(prob::BVProblem; f = missing, bc = missing, u0 = missing, tspan = missing,
- p = missing, kwargs = missing, problem_type = missing, _kwargs...)
+function remake(prob::BVProblem;
+ f = missing,
+ bc = missing,
+ u0 = missing,
+ tspan = missing,
+ p = missing,
+ kwargs = missing,
+ problem_type = missing,
+ _kwargs...,)
if tspan === missing
tspan = prob.tspan
end
@@ -164,14 +174,19 @@ function remake(prob::BVProblem; f = missing, bc = missing, u0 = missing, tspan
ptspan = promote_tspan(tspan)
if iip
_f = BVPFunction{iip, FunctionWrapperSpecialize, twopoint}(wrapfun_iip(f,
- (u0, u0, p, ptspan[1])), bc; prob.f.bcresid_prototype)
+ (u0, u0, p, ptspan[1])),
+ bc;
+ prob.f.bcresid_prototype,)
else
_f = BVPFunction{iip, FunctionWrapperSpecialize, twopoint}(wrapfun_oop(f,
- (u0, p, ptspan[1])), bc; prob.f.bcresid_prototype)
+ (u0, p, ptspan[1])),
+ bc;
+ prob.f.bcresid_prototype,)
end
else
- _f = BVPFunction{isinplace(prob), specialization(prob.f), twopoint}(f, bc;
- prob.f.bcresid_prototype)
+ _f = BVPFunction{isinplace(prob), specialization(prob.f), twopoint}(f,
+ bc;
+ prob.f.bcresid_prototype,)
end
if kwargs === missing
@@ -189,15 +204,15 @@ end
Remake the given `SDEProblem`.
"""
function remake(prob::SDEProblem;
- f = missing,
- u0 = missing,
- tspan = missing,
- p = missing,
- noise = missing,
- noise_rate_prototype = missing,
- seed = missing,
- kwargs = missing,
- _kwargs...)
+ f = missing,
+ u0 = missing,
+ tspan = missing,
+ p = missing,
+ noise = missing,
+ noise_rate_prototype = missing,
+ seed = missing,
+ kwargs = missing,
+ _kwargs...,)
if tspan === missing
tspan = prob.tspan
end
@@ -237,7 +252,7 @@ function remake(prob::SDEProblem;
noise_rate_prototype,
seed,
prob.kwargs...,
- _kwargs...)
+ _kwargs...,)
else
SDEProblem{iip}(f, u0, tspan, p; noise, noise_rate_prototype, seed, kwargs...)
end
@@ -252,17 +267,17 @@ Remake the given `OptimizationProblem`.
If `u0` or `p` are given as symbolic maps `ModelingToolkit.jl` has to be loaded.
"""
function remake(prob::OptimizationProblem;
- f = missing,
- u0 = missing,
- p = missing,
- lb = missing,
- ub = missing,
- int = missing,
- lcons = missing,
- ucons = missing,
- sense = missing,
- kwargs = missing,
- _kwargs...)
+ f = missing,
+ u0 = missing,
+ p = missing,
+ lb = missing,
+ ub = missing,
+ int = missing,
+ lcons = missing,
+ ucons = missing,
+ sense = missing,
+ kwargs = missing,
+ _kwargs...,)
if p === missing && u0 === missing
p, u0 = prob.p, prob.u0
else # at least one of them has a value
@@ -306,15 +321,30 @@ function remake(prob::OptimizationProblem;
end
if kwargs === missing
- OptimizationProblem{isinplace(prob)}(f = f, u0 = u0, p = p, lb = lb,
- ub = ub, int = int,
- lcons = lcons, ucons = ucons,
- sense = sense; prob.kwargs..., _kwargs...)
+ OptimizationProblem{isinplace(prob)}(;
+ f = f,
+ u0 = u0,
+ p = p,
+ lb = lb,
+ ub = ub,
+ int = int,
+ lcons = lcons,
+ ucons = ucons,
+ sense = sense,
+ prob.kwargs...,
+ _kwargs...,)
else
- OptimizationProblem{isinplace(prob)}(f = f, u0 = u0, p = p, lb = lb,
- ub = ub, int = int,
- lcons = lcons, ucons = ucons,
- sense = sense; kwargs...)
+ OptimizationProblem{isinplace(prob)}(;
+ f = f,
+ u0 = u0,
+ p = p,
+ lb = lb,
+ ub = ub,
+ int = int,
+ lcons = lcons,
+ ucons = ucons,
+ sense = sense,
+ kwargs...,)
end
end
@@ -326,12 +356,12 @@ Remake the given `NonlinearProblem`.
If `u0` or `p` are given as symbolic maps `ModelingToolkit.jl` has to be loaded.
"""
function remake(prob::NonlinearProblem;
- f = missing,
- u0 = missing,
- p = missing,
- problem_type = missing,
- kwargs = missing,
- _kwargs...)
+ f = missing,
+ u0 = missing,
+ p = missing,
+ problem_type = missing,
+ kwargs = missing,
+ _kwargs...,)
if p === missing && u0 === missing
p, u0 = prob.p, prob.u0
else # at least one of them has a value
@@ -360,24 +390,35 @@ function remake(prob::NonlinearProblem;
end
if kwargs === missing
- NonlinearProblem{isinplace(prob)}(f = f, u0 = u0, p = p,
- problem_type = problem_type; prob.kwargs...,
- _kwargs...)
+ NonlinearProblem{isinplace(prob)}(;
+ f = f,
+ u0 = u0,
+ p = p,
+ problem_type = problem_type,
+ prob.kwargs...,
+ _kwargs...,)
else
- NonlinearProblem{isinplace(prob)}(f = f, u0 = u0, p = p,
- problem_type = problem_type; kwargs...)
+ NonlinearProblem{isinplace(prob)}(;
+ f = f,
+ u0 = u0,
+ p = p,
+ problem_type = problem_type,
+ kwargs...,)
end
end
-
"""
remake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing,
kwargs = missing, _kwargs...)
Remake the given `NonlinearLeastSquaresProblem`.
"""
-function remake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing,
- kwargs = missing, _kwargs...)
+function remake(prob::NonlinearLeastSquaresProblem;
+ f = missing,
+ u0 = missing,
+ p = missing,
+ kwargs = missing,
+ _kwargs...,)
if p === missing && u0 === missing
p, u0 = prob.p, prob.u0
else # at least one of them has a value
@@ -394,8 +435,12 @@ function remake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p
end
if kwargs === missing
- return NonlinearLeastSquaresProblem{isinplace(prob)}(; f, u0, p, prob.kwargs...,
- _kwargs...)
+ return NonlinearLeastSquaresProblem{isinplace(prob)}(;
+ f,
+ u0,
+ p,
+ prob.kwargs...,
+ _kwargs...,)
else
return NonlinearLeastSquaresProblem{isinplace(prob)}(; f, u0, p, kwargs...)
end
diff --git a/src/retcodes.jl b/src/retcodes.jl
index f9ab26663c..173c6789c5 100644
--- a/src/retcodes.jl
+++ b/src/retcodes.jl
@@ -16,8 +16,7 @@ referred to via `getproperty`, i.e. `SciML.ReturnCode.Success`.
## Note About Success Checking
Previous iterations of the interface suggested using `sol.retcode == :Success`, however,
-that is now not advised instead should be replaced with `
-SciMLBase.successful_retcode(sol)`. The reason is that there are many different
+that is now not advised instead should be replaced with ` SciMLBase.successful_retcode(sol)`. The reason is that there are many different
codes that can be interpreted as successful, such as `ReturnCode.Terminated` which means
successfully used `terminate!(integrator)` to end an integration at a user-specified
condition. As such, `successful_retcode` is the most general way to query for if the solver
@@ -25,10 +24,10 @@ did not error.
## Properties
-* `successful_retcode(retcode::ReturnCode.T)`: Determines whether the output enum is
- considered a success state of the solver, i.e. the solver successfully solved the
- equations. `ReturnCode.Success` is the most basic form, simply declaring that it was
- successful, but many more informative success return codes exist as well.
+ - `successful_retcode(retcode::ReturnCode.T)`: Determines whether the output enum is
+ considered a success state of the solver, i.e. the solver successfully solved the
+ equations. `ReturnCode.Success` is the most basic form, simply declaring that it was
+ successful, but many more informative success return codes exist as well.
"""
EnumX.@enumx ReturnCode begin
"""
@@ -40,17 +39,17 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * A common reason for `Default` return codes is that a solver is a non-SciML solver
- which does not fully conform to the interface. Please open an issue if this is seen
- and it will be improved.
- * Another common reason for a `Default` return code is if the solver is probed
- internally before the solving process is done, such as through the callback interface.
- Return codes are set to `Default` to start and are changed to `Success` and other
- return codes upon finishing the solving process or hitting a numerical difficulty.
+ - A common reason for `Default` return codes is that a solver is a non-SciML solver
+ which does not fully conform to the interface. Please open an issue if this is seen
+ and it will be improved.
+ - Another common reason for a `Default` return code is if the solver is probed
+ internally before the solving process is done, such as through the callback interface.
+ Return codes are set to `Default` to start and are changed to `Success` and other
+ return codes upon finishing the solving process or hitting a numerical difficulty.
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
Default
@@ -62,12 +61,12 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * This is the most common return code and most solvers will give this return code if
- the solving process went as expected without any errors or detected numerical issues.
+ - This is the most common return code and most solvers will give this return code if
+ the solving process went as expected without any errors or detected numerical issues.
## Properties
- * successful_retcode = true
+ - successful_retcode = true
"""
Success
@@ -80,18 +79,18 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for seeing this return code is if a user calls a callback which
- uses `terminate!(integrator)` to halt the integration at a user-chosen stopping point.
- * Another common reason for this return code is due to implicit `terminate!` statements
- in some library callbacks. For example, `SteadyStateCallback` uses `terminate!`
- internally, so solutions which reach steady state will have a `ReturnCode.Terminated`
- state instead of a `ReturnCode.Success` state. Similarly, problems solved via
- SteadyStateDiffEq.jl will have this `ReturnCode.Terminated` state if a timestepping
- method is used to solve to steady state.
+ - The most common reason for seeing this return code is if a user calls a callback which
+ uses `terminate!(integrator)` to halt the integration at a user-chosen stopping point.
+ - Another common reason for this return code is due to implicit `terminate!` statements
+ in some library callbacks. For example, `SteadyStateCallback` uses `terminate!`
+ internally, so solutions which reach steady state will have a `ReturnCode.Terminated`
+ state instead of a `ReturnCode.Success` state. Similarly, problems solved via
+ SteadyStateDiffEq.jl will have this `ReturnCode.Terminated` state if a timestepping
+ method is used to solve to steady state.
## Properties
- * successful_retcode = true
+ - successful_retcode = true
"""
Terminated
@@ -104,17 +103,17 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for seeing this return code is because the automatic `dt`
- selection algorithm is used but the starting derivative has a `NaN` or `Inf`
- derivative term. Double check that the `f(u0,p,t0)` term is well-defined without
- `NaN` or `Inf` values.
- * Another common reason for this return code is because of a user set `dt` which is
- calculated to be a `NaN`. If `solve(prob,alg,dt=x)`, double check that `x` is not
- `NaN`.
+ - The most common reason for seeing this return code is because the automatic `dt`
+ selection algorithm is used but the starting derivative has a `NaN` or `Inf`
+ derivative term. Double check that the `f(u0,p,t0)` term is well-defined without
+ `NaN` or `Inf` values.
+ - Another common reason for this return code is because of a user set `dt` which is
+ calculated to be a `NaN`. If `solve(prob,alg,dt=x)`, double check that `x` is not
+ `NaN`.
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
DtNaN
@@ -137,16 +136,16 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * This commonly occurs in ODE solving if a non-stiff method (e.g. `Tsit5`) is used in
- an algorithm choice for a stiff ODE. It is recommended that in such cases, one tries a
- stiff ODE solver.
- * This commonly occurs in optimization and nonlinear solvers if the tolerance on `solve`
- to too low and cannot be achieved due to floating point error or the condition number
- of the solver matrix. Double check that the chosen tolerance is numerically possible.
+ - This commonly occurs in ODE solving if a non-stiff method (e.g. `Tsit5`) is used in
+ an algorithm choice for a stiff ODE. It is recommended that in such cases, one tries a
+ stiff ODE solver.
+ - This commonly occurs in optimization and nonlinear solvers if the tolerance on `solve`
+ to too low and cannot be achieved due to floating point error or the condition number
+ of the solver matrix. Double check that the chosen tolerance is numerically possible.
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
MaxIters
@@ -159,24 +158,24 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for seeing this return code is because the integration
- is going unstable. As `f(u,p,t) -> infinity`, the time steps required by the solver
- to accurately handle the dynamics decreases. When it gets sufficiently small, `dtmin`,
- an exit is thrown as the solution is likely unstable. `dtmin` is also chosen to be
- around the value where floating point issues cause `t + dt == t`, and thus a `dt`
- of that size is impossible at floating point precision.
- * Another common reason for this return code is if domain constraints are set, such as
- by using `isoutofdomain`, but the domain constraint is incorrect. For example, if
- one is solving the ODE `f(u,p,t) = -u - 1`, one may think "but I want a solution with
- `u > 0` and thus I will set `isoutofdomain(u,p,t) = u < 0`. However, the true solution
- of this ODE is not positive, and thus what will occur is that the solver will try to
- decrease `dt` until it can give an accurate solution that is positive. As this is
- impossible, it will continue to shrink the `dt` until `dt < dtmin` and then exit with
- this return code.
+ - The most common reason for seeing this return code is because the integration
+ is going unstable. As `f(u,p,t) -> infinity`, the time steps required by the solver
+ to accurately handle the dynamics decreases. When it gets sufficiently small, `dtmin`,
+ an exit is thrown as the solution is likely unstable. `dtmin` is also chosen to be
+ around the value where floating point issues cause `t + dt == t`, and thus a `dt`
+ of that size is impossible at floating point precision.
+ - Another common reason for this return code is if domain constraints are set, such as
+ by using `isoutofdomain`, but the domain constraint is incorrect. For example, if
+ one is solving the ODE `f(u,p,t) = -u - 1`, one may think "but I want a solution with
+ `u > 0` and thus I will set `isoutofdomain(u,p,t) = u < 0`. However, the true solution
+ of this ODE is not positive, and thus what will occur is that the solver will try to
+ decrease `dt` until it can give an accurate solution that is positive. As this is
+ impossible, it will continue to shrink the `dt` until `dt < dtmin` and then exit with
+ this return code.
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
DtLessThanMin
@@ -190,12 +189,12 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for seeing this return code is because `u` contains a `NaN`
- or `Inf` value. The default `unstable_check` only checks for these values.
+ - The most common reason for seeing this return code is because `u` contains a `NaN`
+ or `Inf` value. The default `unstable_check` only checks for these values.
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
Unstable
@@ -207,20 +206,20 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for seeing this return code is because the initialization
- process of a DAE solver failed to find consistent initial conditions, which can
- occur if the differentiation index of the DAE solver is too high. Most DAE solvers
- only allow for index-1 DAEs, and so an index-2 DAE will fail during this
- initialization. To solve this kind of problem, use `ModelingToolkit.jl` and its
- `structural_simplify` method to reduce the index of the DAE.
- * Another common reason for this return code is if the initial condition was not
- suitable for the numerical solve. For example, the initial point had a `NaN` or `Inf`.
- Or in optimization, this can occur if the initial point is outside of the bound
- constraints given by the user.
+ - The most common reason for seeing this return code is because the initialization
+ process of a DAE solver failed to find consistent initial conditions, which can
+ occur if the differentiation index of the DAE solver is too high. Most DAE solvers
+ only allow for index-1 DAEs, and so an index-2 DAE will fail during this
+ initialization. To solve this kind of problem, use `ModelingToolkit.jl` and its
+ `structural_simplify` method to reduce the index of the DAE.
+ - Another common reason for this return code is if the initial condition was not
+ suitable for the numerical solve. For example, the initial point had a `NaN` or `Inf`.
+ Or in optimization, this can occur if the initial point is outside of the bound
+ constraints given by the user.
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
InitialFailure
@@ -233,14 +232,14 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for seeing this return code is because an inappropriate
- nonlinear solver was chosen. If fixed point iteration is used on a stiff problem,
- it will be faster by avoiding the Jacobian but it will make a stiff ODE solver not
- stable for stiff problems!
+ - The most common reason for seeing this return code is because an inappropriate
+ nonlinear solver was chosen. If fixed point iteration is used on a stiff problem,
+ it will be faster by avoiding the Jacobian but it will make a stiff ODE solver not
+ stable for stiff problems!
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
ConvergenceFailure
@@ -252,13 +251,13 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for seeing this return code is because the solver is a wrapped
- solver (i.e. a Fortran code) which does not provide any extra information about its
- exit state. If this is from a Julia-based solver, please open an issue.
+ - The most common reason for seeing this return code is because the solver is a wrapped
+ solver (i.e. a Fortran code) which does not provide any extra information about its
+ exit state. If this is from a Julia-based solver, please open an issue.
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
Failure
@@ -270,14 +269,14 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for this return code is via a bracketing nonlinear solver,
- such as bisection, iterating to convergence is unable to give the exact `f(x)=0`
- solution due to floating point precision issues, and thus it gives the first floating
- point value to the left for `x`.
+ - The most common reason for this return code is via a bracketing nonlinear solver,
+ such as bisection, iterating to convergence is unable to give the exact `f(x)=0`
+ solution due to floating point precision issues, and thus it gives the first floating
+ point value to the left for `x`.
## Properties
- * successful_retcode = true
+ - successful_retcode = true
"""
ExactSolutionLeft
@@ -289,14 +288,14 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for this return code is via a bracketing nonlinear solver,
- such as bisection, iterating to convergence is unable to give the exact `f(x)=0`
- solution due to floating point precision issues, and thus it gives the first floating
- point value to the right for `x`.
+ - The most common reason for this return code is via a bracketing nonlinear solver,
+ such as bisection, iterating to convergence is unable to give the exact `f(x)=0`
+ solution due to floating point precision issues, and thus it gives the first floating
+ point value to the right for `x`.
## Properties
- * successful_retcode = true
+ - successful_retcode = true
"""
ExactSolutionRight
@@ -308,14 +307,14 @@ EnumX.@enumx ReturnCode begin
## Common Reasons for Seeing this Return Code
- * The most common reason for this return code is via a nonlinear solver, such as Falsi,
- iterating to convergence is unable to give the exact `f(x)=0` solution due to floating
- point precision issues, and thus it gives the closest floating point value to the
- true solution for `x`.
+ - The most common reason for this return code is via a nonlinear solver, such as Falsi,
+ iterating to convergence is unable to give the exact `f(x)=0` solution due to floating
+ point precision issues, and thus it gives the closest floating point value to the
+ true solution for `x`.
## Properties
- * successful_retcode = true
+ - successful_retcode = true
"""
FloatingPointLimit
@@ -326,7 +325,7 @@ EnumX.@enumx ReturnCode begin
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
Infeasible
@@ -340,7 +339,7 @@ EnumX.@enumx ReturnCode begin
## Properties
- * successful_retcode = false
+ - successful_retcode = false
"""
MaxTime
end
@@ -355,8 +354,11 @@ function Base.convert(::Type{ReturnCode.T}, retcode::Symbol)
if retcode == :Default || retcode == :DEFAULT
ReturnCode.Default
- elseif retcode == :Success || retcode == :EXACT_SOLUTION_LEFT ||
- retcode == :FLOATING_POINT_LIMIT || retcode == symtrue || retcode == :OPTIMAL ||
+ elseif retcode == :Success ||
+ retcode == :EXACT_SOLUTION_LEFT ||
+ retcode == :FLOATING_POINT_LIMIT ||
+ retcode == symtrue ||
+ retcode == :OPTIMAL ||
retcode == :LOCALLY_SOLVED
ReturnCode.Success
elseif retcode == :Terminated
@@ -375,8 +377,10 @@ function Base.convert(::Type{ReturnCode.T}, retcode::Symbol)
ReturnCode.ConvergenceFailure
elseif retcode == :Failure || retcode == symfalse
ReturnCode.Failure
- elseif retcode == :Infeasible || retcode == :INFEASIBLE ||
- retcode == :DUAL_INFEASIBLE || retcode == :LOCALLY_INFEASIBLE ||
+ elseif retcode == :Infeasible ||
+ retcode == :INFEASIBLE ||
+ retcode == :DUAL_INFEASIBLE ||
+ retcode == :LOCALLY_INFEASIBLE ||
retcode == :INFEASIBLE_OR_UNBOUNDED
ReturnCode.Infeasible
else
@@ -389,8 +393,11 @@ symbol_to_ReturnCode(retcode::ReturnCode.T) = retcode
function symbol_to_ReturnCode(retcode::Symbol)
if retcode == :Default || retcode == :DEFAULT
ReturnCode.Default
- elseif retcode == :Success || retcode == :EXACT_SOLUTION_LEFT ||
- retcode == :FLOATING_POINT_LIMIT || retcode == symtrue || retcode == :OPTIMAL ||
+ elseif retcode == :Success ||
+ retcode == :EXACT_SOLUTION_LEFT ||
+ retcode == :FLOATING_POINT_LIMIT ||
+ retcode == symtrue ||
+ retcode == :OPTIMAL ||
retcode == :LOCALLY_SOLVED
ReturnCode.Success
elseif retcode == :Terminated
@@ -409,8 +416,10 @@ function symbol_to_ReturnCode(retcode::Symbol)
ReturnCode.ConvergenceFailure
elseif retcode == :Failure || retcode == symfalse
ReturnCode.Failure
- elseif retcode == :Infeasible || retcode == :INFEASIBLE ||
- retcode == :DUAL_INFEASIBLE || retcode == :LOCALLY_INFEASIBLE ||
+ elseif retcode == :Infeasible ||
+ retcode == :INFEASIBLE ||
+ retcode == :DUAL_INFEASIBLE ||
+ retcode == :LOCALLY_INFEASIBLE ||
retcode == :INFEASIBLE_OR_UNBOUNDED
ReturnCode.Infeasible
else
@@ -431,7 +440,8 @@ Returns a boolean for whether a return code should be interpreted as a form of s
function successful_retcode end
function successful_retcode(retcode::ReturnCode.T)
- retcode == ReturnCode.Success || retcode == ReturnCode.Terminated ||
+ retcode == ReturnCode.Success ||
+ retcode == ReturnCode.Terminated ||
retcode == ReturnCode.ExactSolutionLeft ||
retcode == ReturnCode.ExactSolutionRight ||
retcode == ReturnCode.FloatingPointLimit
diff --git a/src/scimlfunctions.jl b/src/scimlfunctions.jl
index 55eae6b4b1..d18dbd8816 100644
--- a/src/scimlfunctions.jl
+++ b/src/scimlfunctions.jl
@@ -180,9 +180,12 @@ end
function Base.summary(io::IO, prob::AbstractSciMLFunction)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(prob)),
- no_color, ". In-place: ",
- type_color, isinplace(prob),
+ type_color,
+ nameof(typeof(prob)),
+ no_color,
+ ". In-place: ",
+ type_color,
+ isinplace(prob),
no_color)
end
@@ -411,10 +414,29 @@ See the `modelingtoolkitize` function from
automatically symbolically generating the Jacobian and more from the
numerically-defined functions.
"""
-struct ODEFunction{iip, specialize, F, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt, WP, TPJ,
+struct ODEFunction{
+ iip,
+ specialize,
+ F,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ WP,
+ TPJ,
S,
- S2, S3, O, TCV,
- SYS} <: AbstractODEFunction{iip}
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractODEFunction{iip}
f::F
mass_matrix::TMM
analytic::Ta
@@ -541,9 +563,30 @@ automatically symbolically generating the Jacobian and more from the
numerically-defined functions. See `ModelingToolkit.SplitODEProblem` for
information on generating the SplitFunction from this symbolic engine.
"""
-struct SplitFunction{iip, specialize, F1, F2, TMM, C, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt,
- TPJ, S, S2, S3, O,
- TCV, SYS} <: AbstractODEFunction{iip}
+struct SplitFunction{
+ iip,
+ specialize,
+ F1,
+ F2,
+ TMM,
+ C,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractODEFunction{iip}
f1::F1
f2::F2
mass_matrix::TMM
@@ -661,9 +704,29 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the DynamicalODEFunction type directly match the names of the inputs.
"""
-struct DynamicalODEFunction{iip, specialize, F1, F2, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW,
- TWt, TPJ, S, S2, S3,
- O, TCV, SYS} <: AbstractODEFunction{iip}
+struct DynamicalODEFunction{
+ iip,
+ specialize,
+ F1,
+ F2,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractODEFunction{iip}
f1::F1
f2::F2
mass_matrix::TMM
@@ -777,10 +840,28 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the DDEFunction type directly match the names of the inputs.
"""
-struct DDEFunction{iip, specialize, F, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt, TPJ, S,
- S2, S3, O, TCV, SYS,
-} <:
- AbstractDDEFunction{iip}
+struct DDEFunction{
+ iip,
+ specialize,
+ F,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractDDEFunction{iip}
f::F
mass_matrix::TMM
analytic::Ta
@@ -898,9 +979,29 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the DynamicalDDEFunction type directly match the names of the inputs.
"""
-struct DynamicalDDEFunction{iip, specialize, F1, F2, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW,
- TWt, TPJ, S, S2, S3,
- O, TCV, SYS} <: AbstractDDEFunction{iip}
+struct DynamicalDDEFunction{
+ iip,
+ specialize,
+ F1,
+ F2,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractDDEFunction{iip}
f1::F1
f2::F2
mass_matrix::TMM
@@ -926,8 +1027,7 @@ TruncatedStacktraces.@truncate_stacktrace DynamicalDDEFunction 1 2
"""
$(TYPEDEF)
"""
-abstract type AbstractDiscreteFunction{iip} <:
- AbstractDiffEqFunction{iip} end
+abstract type AbstractDiscreteFunction{iip} <: AbstractDiffEqFunction{iip} end
@doc doc"""
DiscreteFunction{iip,F,Ta,S,S2,S3,O} <: AbstractDiscreteFunction{iip,specialize}
@@ -1157,9 +1257,29 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the ODEFunction type directly match the names of the inputs.
"""
-struct SDEFunction{iip, specialize, F, G, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt, TPJ,
- GG, S, S2, S3, O,
- TCV, SYS,
+struct SDEFunction{
+ iip,
+ specialize,
+ F,
+ G,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ GG,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
} <: AbstractSDEFunction{iip}
f::F
g::G
@@ -1279,10 +1399,31 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the SplitSDEFunction type directly match the names of the inputs.
"""
-struct SplitSDEFunction{iip, specialize, F1, F2, G, TMM, C, Ta, Tt, TJ, JVP, VJP, JP, SP,
+struct SplitSDEFunction{
+ iip,
+ specialize,
+ F1,
+ F2,
+ G,
+ TMM,
+ C,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
TW,
- TWt, TPJ,
- S, S2, S3, O, TCV, SYS} <: AbstractSDEFunction{iip}
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractSDEFunction{iip}
f1::F1
f2::F2
g::G
@@ -1401,10 +1542,31 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the DynamicalSDEFunction type directly match the names of the inputs.
"""
-struct DynamicalSDEFunction{iip, specialize, F1, F2, G, TMM, C, Ta, Tt, TJ, JVP, VJP, JP,
+struct DynamicalSDEFunction{
+ iip,
+ specialize,
+ F1,
+ F2,
+ G,
+ TMM,
+ C,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
SP,
- TW, TWt,
- TPJ, S, S2, S3, O, TCV, SYS} <: AbstractSDEFunction{iip}
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractSDEFunction{iip}
# This is a direct copy of the SplitSDEFunction, maybe it's not necessary and the above can be used instead.
f1::F1
f2::F2
@@ -1527,10 +1689,28 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the RODEFunction type directly match the names of the inputs.
"""
-struct RODEFunction{iip, specialize, F, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt, TPJ, S,
- S2, S3, O, TCV, SYS,
-} <:
- AbstractRODEFunction{iip}
+struct RODEFunction{
+ iip,
+ specialize,
+ F,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractRODEFunction{iip}
f::F
mass_matrix::TMM
analytic::Ta
@@ -1683,10 +1863,27 @@ See the `modelingtoolkitize` function from
automatically symbolically generating the Jacobian and more from the
numerically-defined functions.
"""
-struct DAEFunction{iip, specialize, F, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt, TPJ, S, S2,
- S3, O, TCV,
- SYS} <:
- AbstractDAEFunction{iip}
+struct DAEFunction{
+ iip,
+ specialize,
+ F,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractDAEFunction{iip}
f::F
analytic::Ta
tgrad::Tt
@@ -1799,9 +1996,30 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the DDEFunction type directly match the names of the inputs.
"""
-struct SDDEFunction{iip, specialize, F, G, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt, TPJ,
- GG, S, S2, S3, O,
- TCV, SYS} <: AbstractSDDEFunction{iip}
+struct SDDEFunction{
+ iip,
+ specialize,
+ F,
+ G,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ GG,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ SYS,
+} <: AbstractSDDEFunction{iip}
f::F
g::G
mass_matrix::TMM
@@ -1904,8 +2122,28 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the NonlinearFunction type directly match the names of the inputs.
"""
-struct NonlinearFunction{iip, specialize, F, TMM, Ta, Tt, TJ, JVP, VJP, JP, SP, TW, TWt,
- TPJ, S, S2, O, TCV, SYS, RP} <: AbstractNonlinearFunction{iip}
+struct NonlinearFunction{
+ iip,
+ specialize,
+ F,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ JVP,
+ VJP,
+ JP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ O,
+ TCV,
+ SYS,
+ RP,
+} <: AbstractNonlinearFunction{iip}
f::F
mass_matrix::TMM
analytic::Ta
@@ -1983,9 +2221,8 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the IntervalNonlinearFunction type directly match the names of the inputs.
"""
-struct IntervalNonlinearFunction{iip, specialize, F, Ta,
- S, S2, O, SYS,
-} <: AbstractIntervalNonlinearFunction{iip}
+struct IntervalNonlinearFunction{iip, specialize, F, Ta, S, S2, O, SYS} <:
+ AbstractIntervalNonlinearFunction{iip}
f::F
analytic::Ta
syms::S
@@ -2011,87 +2248,94 @@ and more. For all cases, `u` is the state and `p` are the parameters.
## Constructor
```julia
-OptimizationFunction{iip}(f, adtype::AbstractADType = NoAD();
- grad = nothing, hess = nothing, hv = nothing,
- cons = nothing, cons_j = nothing, cons_h = nothing,
- hess_prototype = nothing,
- cons_jac_prototype = nothing,
- cons_hess_prototype = nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
- observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED_NO_TIME,
- lag_h = nothing,
- hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- cons_jac_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- cons_hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- lag_hess_colorvec = nothing,
- sys = __has_sys(f) ? f.sys : nothing)
+OptimizationFunction{iip}(
+ f;
+ adtype::AbstractADType = NoAD(),
+ grad = nothing,
+ hess = nothing,
+ hv = nothing,
+ cons = nothing,
+ cons_j = nothing,
+ cons_h = nothing,
+ hess_prototype = nothing,
+ cons_jac_prototype = nothing,
+ cons_hess_prototype = nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED_NO_TIME,
+ lag_h = nothing,
+ hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ cons_jac_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ cons_hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ lag_hess_colorvec = nothing,
+ sys = __has_sys(f) ? f.sys : nothing,
+)
```
## Positional Arguments
-- `f(u,p,args...)`: the function to optimize. `u` are the optimization variables and `p` are parameters used in definition of
-the objective, even if no such parameters are used in the objective it should be an argument in the function. This can also take
-any additonal arguments that are relevant to the objective function, for example minibatches used in machine learning,
-take a look at the minibatching tutorial [here](https://docs.sciml.ai/Optimization/stable/tutorials/minibatch/). This should return
-a scalar, the loss value, as the first return output and if any additional outputs are returned, they will be passed to the `callback`
-function described in [Callback Functions](@ref).
-- `adtype`: see the section [Defining Optimization Functions via AD](@ref)
+ - `f(u,p,args...)`: the function to optimize. `u` are the optimization variables and `p` are parameters used in definition of
+ the objective, even if no such parameters are used in the objective it should be an argument in the function. This can also take
+ any additonal arguments that are relevant to the objective function, for example minibatches used in machine learning,
+ take a look at the minibatching tutorial [here](https://docs.sciml.ai/Optimization/stable/tutorials/minibatch/). This should return
+ a scalar, the loss value, as the first return output and if any additional outputs are returned, they will be passed to the `callback`
+ function described in [Callback Functions](@ref).
+ - `adtype`: see the section [Defining Optimization Functions via AD](@ref)
## Keyword Arguments
-- `grad(G,u,p)` or `G=grad(u,p)`: the gradient of `f` with respect to `u`. If `f` takes additional arguments
+ - `grad(G,u,p)` or `G=grad(u,p)`: the gradient of `f` with respect to `u`. If `f` takes additional arguments
then `grad(G,u,p,args...)` or `G=grad(u,p,args...)` should be used.
-- `hess(H,u,p)` or `H=hess(u,p)`: the Hessian of `f` with respect to `u`. If `f` takes additional arguments
+ - `hess(H,u,p)` or `H=hess(u,p)`: the Hessian of `f` with respect to `u`. If `f` takes additional arguments
then `hess(H,u,p,args...)` or `H=hess(u,p,args...)` should be used.
-- `hv(Hv,u,v,p)` or `Hv=hv(u,v,p)`: the Hessian-vector product ``(d^2 f / du^2) v``. If `f` takes additional arguments
+ - `hv(Hv,u,v,p)` or `Hv=hv(u,v,p)`: the Hessian-vector product ``(d^2 f / du^2) v``. If `f` takes additional arguments
then `hv(Hv,u,v,p,args...)` or `Hv=hv(u,v,p, args...)` should be used.
-- `cons(res,x,p)` or `res=cons(x,p)` : the constraints function, should mutate the passed `res` array
+ - `cons(res,x,p)` or `res=cons(x,p)` : the constraints function, should mutate the passed `res` array
with value of the `i`th constraint, evaluated at the current values of variables
inside the optimization routine. This takes just the function evaluations
and the equality or inequality assertion is applied by the solver based on the constraint
bounds passed as `lcons` and `ucons` to [`OptimizationProblem`](@ref), in case of equality
constraints `lcons` and `ucons` should be passed equal values.
-- `cons_j(J,x,p)` or `J=cons_j(x,p)`: the Jacobian of the constraints.
-- `cons_h(H,x,p)` or `H=cons_h(x,p)`: the Hessian of the constraints, provided as
- an array of Hessians with `res[i]` being the Hessian with respect to the `i`th output on `cons`.
-- `hess_prototype`: a prototype matrix matching the type that matches the Hessian. For example,
- if the Hessian is tridiagonal, then an appropriately sized `Hessian` matrix can be used
- as the prototype and optimization solvers will specialize on this structure where possible. Non-structured
- sparsity patterns should use a `SparseMatrixCSC` with a correct sparsity pattern for the Hessian.
- The default is `nothing`, which means a dense Hessian.
-- `cons_jac_prototype`: a prototype matrix matching the type that matches the constraint Jacobian.
- The default is `nothing`, which means a dense constraint Jacobian.
-- `cons_hess_prototype`: a prototype matrix matching the type that matches the constraint Hessian.
- This is defined as an array of matrices, where `hess[i]` is the Hessian w.r.t. the `i`th output.
- For example, if the Hessian is sparse, then `hess` is a `Vector{SparseMatrixCSC}`.
- The default is `nothing`, which means a dense constraint Hessian.
-- `lag_h(res,x,sigma,mu,p)` or `res=lag_h(x,sigma,mu,p)`: the Hessian of the Lagrangian,
- where `sigma` is a multiplier of the cost function and `mu` are the Lagrange multipliers
- multiplying the constraints. This can be provided instead of `hess` and `cons_h`
- to solvers that directly use the Hessian of the Lagrangian.
-- `hess_colorvec`: a color vector according to the SparseDiffTools.jl definition for the sparsity
- pattern of the `hess_prototype`. This specializes the Hessian construction when using
- finite differences and automatic differentiation to be computed in an accelerated manner
- based on the sparsity pattern. Defaults to `nothing`, which means a color vector will be
- internally computed on demand when required. The cost of this operation is highly dependent
- on the sparsity pattern.
-- `cons_jac_colorvec`: a color vector according to the SparseDiffTools.jl definition for the sparsity
- pattern of the `cons_jac_prototype`.
-- `cons_hess_colorvec`: an array of color vector according to the SparseDiffTools.jl definition for
- the sparsity pattern of the `cons_hess_prototype`.
+ - `cons_j(J,x,p)` or `J=cons_j(x,p)`: the Jacobian of the constraints.
+ - `cons_h(H,x,p)` or `H=cons_h(x,p)`: the Hessian of the constraints, provided as
+ an array of Hessians with `res[i]` being the Hessian with respect to the `i`th output on `cons`.
+ - `hess_prototype`: a prototype matrix matching the type that matches the Hessian. For example,
+ if the Hessian is tridiagonal, then an appropriately sized `Hessian` matrix can be used
+ as the prototype and optimization solvers will specialize on this structure where possible. Non-structured
+ sparsity patterns should use a `SparseMatrixCSC` with a correct sparsity pattern for the Hessian.
+ The default is `nothing`, which means a dense Hessian.
+ - `cons_jac_prototype`: a prototype matrix matching the type that matches the constraint Jacobian.
+ The default is `nothing`, which means a dense constraint Jacobian.
+ - `cons_hess_prototype`: a prototype matrix matching the type that matches the constraint Hessian.
+ This is defined as an array of matrices, where `hess[i]` is the Hessian w.r.t. the `i`th output.
+ For example, if the Hessian is sparse, then `hess` is a `Vector{SparseMatrixCSC}`.
+ The default is `nothing`, which means a dense constraint Hessian.
+ - `lag_h(res,x,sigma,mu,p)` or `res=lag_h(x,sigma,mu,p)`: the Hessian of the Lagrangian,
+ where `sigma` is a multiplier of the cost function and `mu` are the Lagrange multipliers
+ multiplying the constraints. This can be provided instead of `hess` and `cons_h`
+ to solvers that directly use the Hessian of the Lagrangian.
+ - `hess_colorvec`: a color vector according to the SparseDiffTools.jl definition for the sparsity
+ pattern of the `hess_prototype`. This specializes the Hessian construction when using
+ finite differences and automatic differentiation to be computed in an accelerated manner
+ based on the sparsity pattern. Defaults to `nothing`, which means a color vector will be
+ internally computed on demand when required. The cost of this operation is highly dependent
+ on the sparsity pattern.
+ - `cons_jac_colorvec`: a color vector according to the SparseDiffTools.jl definition for the sparsity
+ pattern of the `cons_jac_prototype`.
+ - `cons_hess_colorvec`: an array of color vector according to the SparseDiffTools.jl definition for
+ the sparsity pattern of the `cons_hess_prototype`.
When [Symbolic Problem Building with ModelingToolkit](@ref) interface is used the following arguments are also relevant:
-- `syms`: the symbol names for the elements of the equation. This should match `u0` in size. For
- example, if `u = [0.0,1.0]` and `syms = [:x, :y]`, this will apply a canonical naming to the
- values, allowing `sol[:x]` in the solution and automatically naming values in plots.
-- `paramsyms`: the symbol names for the parameters of the equation. This should match `p` in
- size. For example, if `p = [0.0, 1.0]` and `paramsyms = [:a, :b]`, this will apply a canonical
- naming to the values, allowing `sol[:a]` in the solution.
-- `observed`: an algebraic combination of optimization variables that is of interest to the user
+ - `syms`: the symbol names for the elements of the equation. This should match `u0` in size. For
+ example, if `u = [0.0,1.0]` and `syms = [:x, :y]`, this will apply a canonical naming to the
+ values, allowing `sol[:x]` in the solution and automatically naming values in plots.
+ - `paramsyms`: the symbol names for the parameters of the equation. This should match `p` in
+ size. For example, if `p = [0.0, 1.0]` and `paramsyms = [:a, :b]`, this will apply a canonical
+ naming to the values, allowing `sol[:a]` in the solution.
+ - `observed`: an algebraic combination of optimization variables that is of interest to the user
which will be available in the solution. This can be single or multiple expressions.
-- `sys`: field that stores the `OptimizationSystem`.
+ - `sys`: field that stores the `OptimizationSystem`.
## Defining Optimization Functions via AD
@@ -2102,7 +2346,7 @@ Automatic Differentiation backend to use for automatically filling in all of the
For example,
```julia
-OptimizationFunction(f,AutoForwardDiff())
+OptimizationFunction(f, AutoForwardDiff())
```
will use [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) to define
@@ -2124,9 +2368,32 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the OptimizationFunction type directly match the names of the inputs.
"""
-struct OptimizationFunction{iip, AD, F, G, H, HV, C, CJ, CH, HP, CJP, CHP, S, S2, O,
- EX, CEX, SYS, LH, LHP, HCV, CJCV, CHCV, LHCV} <:
- AbstractOptimizationFunction{iip}
+struct OptimizationFunction{
+ iip,
+ AD,
+ F,
+ G,
+ H,
+ HV,
+ C,
+ CJ,
+ CH,
+ HP,
+ CJP,
+ CHP,
+ S,
+ S2,
+ O,
+ EX,
+ CEX,
+ SYS,
+ LH,
+ LHP,
+ HCV,
+ CJCV,
+ CHCV,
+ LHCV,
+} <: AbstractOptimizationFunction{iip}
f::F
adtype::AD
grad::G
@@ -2262,9 +2529,34 @@ For more details on this argument, see the ODEFunction documentation.
The fields of the BVPFunction type directly match the names of the inputs.
"""
-struct BVPFunction{iip, specialize, twopoint, F, BF, TMM, Ta, Tt, TJ, BCTJ, JVP, VJP,
- JP, BCJP, BCRP, SP, TW, TWt, TPJ, S, S2, S3, O, TCV, BCTCV,
- SYS} <: AbstractBVPFunction{iip, twopoint}
+struct BVPFunction{
+ iip,
+ specialize,
+ twopoint,
+ F,
+ BF,
+ TMM,
+ Ta,
+ Tt,
+ TJ,
+ BCTJ,
+ JVP,
+ VJP,
+ JP,
+ BCJP,
+ BCRP,
+ SP,
+ TW,
+ TWt,
+ TPJ,
+ S,
+ S2,
+ S3,
+ O,
+ TCV,
+ BCTCV,
+ SYS,
+} <: AbstractBVPFunction{iip, twopoint}
f::F
bc::BF
mass_matrix::TMM
@@ -2330,8 +2622,7 @@ This field is currently unused
The fields of the IntegralFunction type directly match the names of the inputs.
"""
-struct IntegralFunction{iip, specialize, F, T} <:
- AbstractIntegralFunction{iip}
+struct IntegralFunction{iip, specialize, F, T} <: AbstractIntegralFunction{iip}
f::F
integrand_prototype::T
end
@@ -2393,8 +2684,7 @@ This field is currently unused
The fields of the BatchIntegralFunction type directly match the names of the inputs.
"""
-struct BatchIntegralFunction{iip, specialize, F, T} <:
- AbstractIntegralFunction{iip}
+struct BatchIntegralFunction{iip, specialize, F, T} <: AbstractIntegralFunction{iip}
f::F
integrand_prototype::T
max_batch::Int
@@ -2462,32 +2752,24 @@ end
######### Basic Constructor
function ODEFunction{iip, specialize}(f;
- mass_matrix = __has_mass_matrix(f) ? f.mass_matrix :
- I,
- analytic = __has_analytic(f) ? f.analytic : nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype :
- nothing,
- sparsity = __has_sparsity(f) ? f.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
- W_prototype = __has_W_prototype(f) ? f.W_prototype : nothing,
- paramjac = __has_paramjac(f) ? f.paramjac : nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- sys = __has_sys(f) ? f.sys : nothing) where {iip,
- specialize,
-}
+ mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ W_prototype = __has_W_prototype(f) ? f.W_prototype : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
if mass_matrix === I && f isa Tuple
mass_matrix = ((I for i in 1:length(f))...,)
end
@@ -2505,7 +2787,8 @@ function ODEFunction{iip, specialize}(f;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterface.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterface.matrix_colors(jac_prototype)
else
@@ -2520,7 +2803,12 @@ function ODEFunction{iip, specialize}(f;
Wfact_tiip = Wfact_t !== nothing ? isinplace(Wfact_t, 5, "Wfact_t", iip) : iip
paramjaciip = paramjac !== nothing ? isinplace(paramjac, 4, "paramjac", iip) : iip
- nonconforming = (jaciip, tgradiip, jvpiip, vjpiip, Wfactiip, Wfact_tiip,
+ nonconforming = (jaciip,
+ tgradiip,
+ jvpiip,
+ vjpiip,
+ Wfactiip,
+ Wfact_tiip,
paramjaciip) .!= iip
if any(nonconforming)
nonconforming = findall(nonconforming)
@@ -2531,40 +2819,131 @@ function ODEFunction{iip, specialize}(f;
_f = prepare_function(f)
if specialize === NoSpecialize
- ODEFunction{iip, specialize,
- Any, Any, Any, Any,
- Any, Any, Any, typeof(jac_prototype),
- typeof(sparsity), Any, Any, typeof(W_prototype), Any,
- typeof(syms), typeof(indepsym), typeof(paramsyms), Any,
+ ODEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(jac_prototype),
+ typeof(sparsity),
+ Any,
+ Any,
+ typeof(W_prototype),
+ Any,
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ Any,
typeof(_colorvec),
- typeof(sys)}(_f, mass_matrix, analytic, tgrad, jac,
- jvp, vjp, jac_prototype, sparsity, Wfact,
- Wfact_t, W_prototype, paramjac, syms, indepsym, paramsyms,
- observed, _colorvec, sys)
+ typeof(sys),
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ W_prototype,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
elseif specialize === false
- ODEFunction{iip, FunctionWrapperSpecialize,
- typeof(_f), typeof(mass_matrix), typeof(analytic), typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact), typeof(Wfact_t), typeof(W_prototype),
+ ODEFunction{
+ iip,
+ FunctionWrapperSpecialize,
+ typeof(_f),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(W_prototype),
typeof(paramjac),
- typeof(syms), typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(_colorvec),
- typeof(sys)}(_f, mass_matrix, analytic, tgrad, jac,
- jvp, vjp, jac_prototype, sparsity, Wfact,
- Wfact_t, W_prototype, paramjac, syms, indepsym, paramsyms,
- observed, _colorvec, sys)
+ typeof(sys),
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ W_prototype,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
else
- ODEFunction{iip, specialize,
- typeof(_f), typeof(mass_matrix), typeof(analytic), typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact), typeof(Wfact_t), typeof(W_prototype),
+ ODEFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(W_prototype),
typeof(paramjac),
- typeof(syms), typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(_colorvec),
- typeof(sys)}(_f, mass_matrix, analytic, tgrad, jac,
- jvp, vjp, jac_prototype, sparsity, Wfact,
- Wfact_t, W_prototype, paramjac, syms, indepsym, paramsyms,
- observed, _colorvec, sys)
+ typeof(sys),
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ W_prototype,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
end
end
@@ -2577,26 +2956,88 @@ ODEFunction(f::ODEFunction; kwargs...) = f
function unwrapped_f(f::ODEFunction, newf = unwrapped_f(f.f))
if specialization(f) === NoSpecialize
- ODEFunction{isinplace(f), specialization(f), Any, Any, Any,
- Any, Any, Any, Any, typeof(f.jac_prototype),
- typeof(f.sparsity), Any, Any, Any,
- typeof(f.syms), Any, Any, Any, typeof(f.colorvec),
- typeof(f.sys)}(newf, f.mass_matrix, f.analytic, f.tgrad, f.jac,
- f.jvp, f.vjp, f.jac_prototype, f.sparsity, f.Wfact,
- f.Wfact_t, f.W_prototype, f.paramjac, f.syms, f.indepsym, f.paramsyms,
- f.observed, f.colorvec, f.sys)
+ ODEFunction{
+ isinplace(f),
+ specialization(f),
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(f.jac_prototype),
+ typeof(f.sparsity),
+ Any,
+ Any,
+ Any,
+ typeof(f.syms),
+ Any,
+ Any,
+ Any,
+ typeof(f.colorvec),
+ typeof(f.sys),
+ }(newf,
+ f.mass_matrix,
+ f.analytic,
+ f.tgrad,
+ f.jac,
+ f.jvp,
+ f.vjp,
+ f.jac_prototype,
+ f.sparsity,
+ f.Wfact,
+ f.Wfact_t,
+ f.W_prototype,
+ f.paramjac,
+ f.syms,
+ f.indepsym,
+ f.paramsyms,
+ f.observed,
+ f.colorvec,
+ f.sys)
else
- ODEFunction{isinplace(f), specialization(f), typeof(newf), typeof(f.mass_matrix),
- typeof(f.analytic), typeof(f.tgrad),
- typeof(f.jac), typeof(f.jvp), typeof(f.vjp), typeof(f.jac_prototype),
- typeof(f.sparsity), typeof(f.Wfact), typeof(f.Wfact_t), typeof(f.W_prototype),
+ ODEFunction{
+ isinplace(f),
+ specialization(f),
+ typeof(newf),
+ typeof(f.mass_matrix),
+ typeof(f.analytic),
+ typeof(f.tgrad),
+ typeof(f.jac),
+ typeof(f.jvp),
+ typeof(f.vjp),
+ typeof(f.jac_prototype),
+ typeof(f.sparsity),
+ typeof(f.Wfact),
+ typeof(f.Wfact_t),
+ typeof(f.W_prototype),
typeof(f.paramjac),
- typeof(f.syms), typeof(f.indepsym), typeof(f.paramsyms),
- typeof(f.observed), typeof(f.colorvec),
- typeof(f.sys)}(newf, f.mass_matrix, f.analytic, f.tgrad, f.jac,
- f.jvp, f.vjp, f.jac_prototype, f.sparsity, f.Wfact,
- f.Wfact_t, f.W_prototype, f.paramjac, f.syms, f.indepsym, f.paramsyms,
- f.observed, f.colorvec, f.sys)
+ typeof(f.syms),
+ typeof(f.indepsym),
+ typeof(f.paramsyms),
+ typeof(f.observed),
+ typeof(f.colorvec),
+ typeof(f.sys),
+ }(newf,
+ f.mass_matrix,
+ f.analytic,
+ f.tgrad,
+ f.jac,
+ f.jvp,
+ f.vjp,
+ f.jac_prototype,
+ f.sparsity,
+ f.Wfact,
+ f.Wfact_t,
+ f.W_prototype,
+ f.paramjac,
+ f.syms,
+ f.indepsym,
+ f.paramsyms,
+ f.observed,
+ f.colorvec,
+ f.sys)
end
end
@@ -2646,7 +3087,7 @@ function ODEFunction{iip}(f::NonlinearFunction) where {iip}
indepsym = nothing,
paramsyms = f.paramsyms,
observed = f.observed,
- colorvec = f.colorvec)
+ colorvec = f.colorvec,)
end
"""
@@ -2696,12 +3137,29 @@ function NonlinearFunction{iip}(f::ODEFunction) where {iip}
syms = f.syms,
paramsyms = f.paramsyms,
observed = f.observed,
- colorvec = f.colorvec)
+ colorvec = f.colorvec,)
end
-@add_kwonly function SplitFunction(f1, f2, mass_matrix, cache, analytic, tgrad, jac, jvp,
- vjp, jac_prototype, sparsity, Wfact, Wfact_t, paramjac,
- syms, indepsym, paramsyms, observed, colorvec, sys)
+@add_kwonly function SplitFunction(f1,
+ f2,
+ mass_matrix,
+ cache,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
f1 = ODEFunction(f1)
f2 = ODEFunction(f2)
@@ -2710,83 +3168,186 @@ end
throw(NonconformingFunctionsError(["f2"]))
end
- SplitFunction{isinplace(f2), FullSpecialize, typeof(f1), typeof(f2),
+ SplitFunction{
+ isinplace(f2),
+ FullSpecialize,
+ typeof(f1),
+ typeof(f2),
typeof(mass_matrix),
- typeof(cache), typeof(analytic), typeof(tgrad), typeof(jac), typeof(jvp),
- typeof(vjp), typeof(jac_prototype), typeof(sparsity),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed), typeof(colorvec),
- typeof(sys)}(f1, f2, mass_matrix, cache, analytic, tgrad, jac, jvp, vjp,
- jac_prototype, sparsity, Wfact, Wfact_t, paramjac, syms,
+ typeof(cache),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(colorvec),
+ typeof(sys),
+ }(f1,
+ f2,
+ mass_matrix,
+ cache,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
indepsym,
- paramsyms, observed, colorvec, sys)
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
-function SplitFunction{iip, specialize}(f1, f2;
- mass_matrix = __has_mass_matrix(f1) ?
- f1.mass_matrix : I,
- _func_cache = nothing,
- analytic = __has_analytic(f1) ? f1.analytic :
- nothing,
- tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
- jac = __has_jac(f1) ? f1.jac : nothing,
- jvp = __has_jvp(f1) ? f1.jvp : nothing,
- vjp = __has_vjp(f1) ? f1.vjp : nothing,
- jac_prototype = __has_jac_prototype(f1) ?
- f1.jac_prototype :
- nothing,
- sparsity = __has_sparsity(f1) ? f1.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing,
- paramjac = __has_paramjac(f1) ? f1.paramjac :
- nothing,
- syms = __has_syms(f1) ? f1.syms : nothing,
- indepsym = __has_indepsym(f1) ? f1.indepsym :
- nothing,
- paramsyms = __has_paramsyms(f1) ? f1.paramsyms :
- nothing,
- observed = __has_observed(f1) ? f1.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f1) ? f1.colorvec :
- nothing,
- sys = __has_sys(f1) ? f1.sys : nothing) where {iip,
- specialize,
-}
+function SplitFunction{iip, specialize}(f1,
+ f2;
+ mass_matrix = __has_mass_matrix(f1) ? f1.mass_matrix : I,
+ _func_cache = nothing,
+ analytic = __has_analytic(f1) ? f1.analytic : nothing,
+ tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
+ jac = __has_jac(f1) ? f1.jac : nothing,
+ jvp = __has_jvp(f1) ? f1.jvp : nothing,
+ vjp = __has_vjp(f1) ? f1.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f1) ? f1.jac_prototype : nothing,
+ sparsity = __has_sparsity(f1) ? f1.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing,
+ paramjac = __has_paramjac(f1) ? f1.paramjac : nothing,
+ syms = __has_syms(f1) ? f1.syms : nothing,
+ indepsym = __has_indepsym(f1) ? f1.indepsym : nothing,
+ paramsyms = __has_paramsyms(f1) ? f1.paramsyms : nothing,
+ observed = __has_observed(f1) ? f1.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f1) ? f1.colorvec : nothing,
+ sys = __has_sys(f1) ? f1.sys : nothing,) where {iip, specialize}
if specialize === NoSpecialize
- SplitFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any}(f1, f2, mass_matrix, _func_cache,
+ SplitFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ }(f1,
+ f2,
+ mass_matrix,
+ _func_cache,
analytic,
- tgrad, jac, jvp, vjp, jac_prototype,
- sparsity, Wfact, Wfact_t, paramjac,
- syms, indepsym, paramsyms,
- observed, colorvec, sys)
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
else
- SplitFunction{iip, specialize, typeof(f1), typeof(f2), typeof(mass_matrix),
- typeof(_func_cache), typeof(analytic),
- typeof(tgrad), typeof(jac), typeof(jvp), typeof(vjp),
- typeof(jac_prototype), typeof(sparsity),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ SplitFunction{
+ iip,
+ specialize,
+ typeof(f1),
+ typeof(f2),
+ typeof(mass_matrix),
+ typeof(_func_cache),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, mass_matrix, _func_cache, analytic, tgrad, jac,
- jvp, vjp, jac_prototype,
- sparsity, Wfact, Wfact_t, paramjac, syms, indepsym,
- paramsyms, observed, colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ mass_matrix,
+ _func_cache,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
end
SplitFunction(f1, f2; kwargs...) = SplitFunction{isinplace(f2, 4)}(f1, f2; kwargs...)
function SplitFunction{iip}(f1, f2; kwargs...) where {iip}
- SplitFunction{iip, FullSpecialize}(ODEFunction(f1), ODEFunction{iip}(f2);
- kwargs...)
+ SplitFunction{iip, FullSpecialize}(ODEFunction(f1), ODEFunction{iip}(f2); kwargs...)
end
SplitFunction(f::SplitFunction; kwargs...) = f
-@add_kwonly function DynamicalODEFunction{iip}(f1, f2, mass_matrix, analytic, tgrad, jac,
- jvp, vjp, jac_prototype, sparsity, Wfact,
- Wfact_t, paramjac, syms, indepsym, paramsyms,
- observed, colorvec, sys) where {iip}
+@add_kwonly function DynamicalODEFunction{iip}(f1,
+ f2,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys) where {iip}
f1 = f1 isa AbstractSciMLOperator ? f1 : ODEFunction(f1)
f2 = ODEFunction(f2)
@@ -2794,77 +3355,151 @@ SplitFunction(f::SplitFunction; kwargs...) = f
throw(NonconformingFunctionsError(["f2"]))
end
- DynamicalODEFunction{isinplace(f2), FullSpecialize, typeof(f1), typeof(f2),
+ DynamicalODEFunction{
+ isinplace(f2),
+ FullSpecialize,
+ typeof(f1),
+ typeof(f2),
typeof(mass_matrix),
- typeof(analytic), typeof(tgrad), typeof(jac), typeof(jvp),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
typeof(vjp),
typeof(jac_prototype),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, mass_matrix, analytic, tgrad, jac, jvp,
- vjp, jac_prototype, sparsity, Wfact, Wfact_t,
- paramjac, syms, indepsym, paramsyms, observed,
- colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
-function DynamicalODEFunction{iip, specialize}(f1, f2;
- mass_matrix = __has_mass_matrix(f1) ?
- f1.mass_matrix : I,
- analytic = __has_analytic(f1) ? f1.analytic :
- nothing,
- tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
- jac = __has_jac(f1) ? f1.jac : nothing,
- jvp = __has_jvp(f1) ? f1.jvp : nothing,
- vjp = __has_vjp(f1) ? f1.vjp : nothing,
- jac_prototype = __has_jac_prototype(f1) ?
- f1.jac_prototype : nothing,
- sparsity = __has_sparsity(f1) ? f1.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t :
- nothing,
- paramjac = __has_paramjac(f1) ? f1.paramjac :
- nothing,
- syms = __has_syms(f1) ? f1.syms : nothing,
- indepsym = __has_indepsym(f1) ? f1.indepsym :
- nothing,
- paramsyms = __has_paramsyms(f1) ?
- f1.paramsyms :
- nothing,
- observed = __has_observed(f1) ? f1.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f1) ? f1.colorvec :
- nothing,
- sys = __has_sys(f1) ? f1.sys : nothing) where {
- iip,
- specialize,
-}
+function DynamicalODEFunction{iip, specialize}(f1,
+ f2;
+ mass_matrix = __has_mass_matrix(f1) ? f1.mass_matrix : I,
+ analytic = __has_analytic(f1) ? f1.analytic : nothing,
+ tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
+ jac = __has_jac(f1) ? f1.jac : nothing,
+ jvp = __has_jvp(f1) ? f1.jvp : nothing,
+ vjp = __has_vjp(f1) ? f1.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f1) ? f1.jac_prototype : nothing,
+ sparsity = __has_sparsity(f1) ? f1.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing,
+ paramjac = __has_paramjac(f1) ? f1.paramjac : nothing,
+ syms = __has_syms(f1) ? f1.syms : nothing,
+ indepsym = __has_indepsym(f1) ? f1.indepsym : nothing,
+ paramsyms = __has_paramsyms(f1) ? f1.paramsyms : nothing,
+ observed = __has_observed(f1) ? f1.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f1) ? f1.colorvec : nothing,
+ sys = __has_sys(f1) ? f1.sys : nothing,) where {iip, specialize}
if specialize === NoSpecialize
- DynamicalODEFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any, Any}(f1, f2, mass_matrix,
+ DynamicalODEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ }(f1,
+ f2,
+ mass_matrix,
analytic,
tgrad,
- jac, jvp, vjp,
+ jac,
+ jvp,
+ vjp,
jac_prototype,
sparsity,
- Wfact, Wfact_t, paramjac,
- syms, indepsym, paramsyms,
- observed, colorvec, sys)
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
else
- DynamicalODEFunction{iip, specialize, typeof(f1), typeof(f2), typeof(mass_matrix),
+ DynamicalODEFunction{
+ iip,
+ specialize,
+ typeof(f1),
+ typeof(f2),
+ typeof(mass_matrix),
typeof(analytic),
- typeof(tgrad), typeof(jac), typeof(jvp), typeof(vjp),
- typeof(jac_prototype), typeof(sparsity),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, mass_matrix, analytic, tgrad, jac, jvp,
- vjp, jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, syms, indepsym,
- paramsyms, observed,
- colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
end
@@ -2873,34 +3508,45 @@ function DynamicalODEFunction(f1, f2 = nothing; kwargs...)
end
function DynamicalODEFunction{iip}(f1, f2; kwargs...) where {iip}
DynamicalODEFunction{iip, FullSpecialize}(ODEFunction{iip}(f1),
- ODEFunction{iip}(f2); kwargs...)
+ ODEFunction{iip}(f2);
+ kwargs...,)
end
DynamicalODEFunction(f::DynamicalODEFunction; kwargs...) = f
function DiscreteFunction{iip, specialize}(f;
- analytic = __has_analytic(f) ? f.analytic :
- nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym :
- nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED,
- sys = __has_sys(f) ? f.sys : nothing) where {iip,
- specialize,
-}
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
_f = prepare_function(f)
if specialize === NoSpecialize
- DiscreteFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any}(_f, analytic,
- syms, indepsym,
+ DiscreteFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any}(_f,
+ analytic,
+ syms,
+ indepsym,
parasmsyms,
- observed, sys)
+ observed,
+ sys)
else
- DiscreteFunction{iip, specialize, typeof(_f), typeof(analytic),
- typeof(syms), typeof(indepsym), typeof(paramsyms),
- typeof(observed), typeof(sys)}(_f, analytic, syms, indepsym,
- paramsyms, observed, sys)
+ DiscreteFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(analytic),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(sys),
+ }(_f,
+ analytic,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ sys)
end
end
@@ -2917,36 +3563,41 @@ function unwrapped_f(f::DiscreteFunction, newf = unwrapped_f(f.f))
specialize = specialization(f)
if specialize === NoSpecialize
- DiscreteFunction{isinplace(f), specialize, Any, Any,
- Any, Any, Any, Any, Any}(newf, f.analytic, f.syms, f.indepsym,
- f.paramsyms, f.observed, f.sys)
+ DiscreteFunction{isinplace(f), specialize, Any, Any, Any, Any, Any, Any, Any}(newf,
+ f.analytic,
+ f.syms,
+ f.indepsym,
+ f.paramsyms,
+ f.observed,
+ f.sys)
else
- DiscreteFunction{isinplace(f), specialize, typeof(newf), typeof(f.analytic),
- typeof(f.syms), typeof(f.indepsym), typeof(f.paramsyms),
- typeof(f.observed), typeof(f.sys)}(newf, f.analytic, f.syms,
- f.indepsym, f.paramsyms,
- f.observed, f.sys)
+ DiscreteFunction{
+ isinplace(f),
+ specialize,
+ typeof(newf),
+ typeof(f.analytic),
+ typeof(f.syms),
+ typeof(f.indepsym),
+ typeof(f.paramsyms),
+ typeof(f.observed),
+ typeof(f.sys),
+ }(newf,
+ f.analytic,
+ f.syms,
+ f.indepsym,
+ f.paramsyms,
+ f.observed,
+ f.sys)
end
end
function ImplicitDiscreteFunction{iip, specialize}(f;
- analytic = __has_analytic(f) ?
- f.analytic :
- nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ?
- f.indepsym :
- nothing,
- paramsyms = __has_paramsyms(f) ?
- f.paramsyms :
- nothing,
- observed = __has_observed(f) ?
- f.observed :
- DEFAULT_OBSERVED,
- sys = __has_sys(f) ? f.sys : nothing) where {
- iip,
- specialize,
-}
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
_f = prepare_function(f)
if specialize === NoSpecialize
ImplicitDiscreteFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any}(_f,
@@ -2957,10 +3608,23 @@ function ImplicitDiscreteFunction{iip, specialize}(f;
observed,
sys)
else
- ImplicitDiscreteFunction{iip, specialize, typeof(_f), typeof(analytic),
- typeof(syms), typeof(indepsym), typeof(paramsyms),
- typeof(observed), typeof(sys)}(_f, analytic, syms, indepsym,
- paramsyms, observed, sys)
+ ImplicitDiscreteFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(analytic),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(sys),
+ }(_f,
+ analytic,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ sys)
end
end
@@ -2977,48 +3641,64 @@ function unwrapped_f(f::ImplicitDiscreteFunction, newf = unwrapped_f(f.f))
specialize = specialization(f)
if specialize === NoSpecialize
- ImplicitDiscreteFunction{isinplace(f, 6), specialize, Any, Any,
- Any, Any, Any, Any, Any}(newf, f.analytic, f.syms,
+ ImplicitDiscreteFunction{
+ isinplace(f, 6),
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ }(newf,
+ f.analytic,
+ f.syms,
f.indepsym,
- f.paramsyms, f.observed, f.sys)
+ f.paramsyms,
+ f.observed,
+ f.sys)
else
- ImplicitDiscreteFunction{isinplace(f, 6), specialize, typeof(newf),
+ ImplicitDiscreteFunction{
+ isinplace(f, 6),
+ specialize,
+ typeof(newf),
typeof(f.analytic),
- typeof(f.syms), typeof(f.indepsym), typeof(f.paramsyms),
- typeof(f.observed), typeof(f.sys)}(newf, f.analytic,
+ typeof(f.syms),
+ typeof(f.indepsym),
+ typeof(f.paramsyms),
+ typeof(f.observed),
+ typeof(f.sys),
+ }(newf,
+ f.analytic,
f.syms,
- f.indepsym, f.paramsyms,
- f.observed, f.sys)
+ f.indepsym,
+ f.paramsyms,
+ f.observed,
+ f.sys)
end
end
-function SDEFunction{iip, specialize}(f, g;
- mass_matrix = __has_mass_matrix(f) ? f.mass_matrix :
- I,
- analytic = __has_analytic(f) ? f.analytic : nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype :
- nothing,
- sparsity = __has_sparsity(f) ? f.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
- paramjac = __has_paramjac(f) ? f.paramjac : nothing,
- ggprime = nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- sys = __has_sys(f) ? f.sys : nothing) where {iip,
- specialize,
-}
+function SDEFunction{iip, specialize}(f,
+ g;
+ mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ ggprime = nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
if jac === nothing && isa(jac_prototype, AbstractSciMLOperator)
if iip
jac = update_coefficients! #(J,u,p,t)
@@ -3027,7 +3707,8 @@ function SDEFunction{iip, specialize}(f, g;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterface.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterface.matrix_colors(jac_prototype)
else
@@ -3043,7 +3724,13 @@ function SDEFunction{iip, specialize}(f, g;
Wfact_tiip = Wfact_t !== nothing ? isinplace(Wfact_t, 5, "Wfact_t", iip) : iip
paramjaciip = paramjac !== nothing ? isinplace(paramjac, 4, "paramjac", iip) : iip
- nonconforming = (giip, jaciip, tgradiip, jvpiip, vjpiip, Wfactiip, Wfact_tiip,
+ nonconforming = (giip,
+ jaciip,
+ tgradiip,
+ jvpiip,
+ vjpiip,
+ Wfactiip,
+ Wfact_tiip,
paramjaciip) .!= iip
if any(nonconforming)
nonconforming = findall(nonconforming)
@@ -3054,51 +3741,131 @@ function SDEFunction{iip, specialize}(f, g;
_f = prepare_function(f)
_g = prepare_function(g)
if specialize === NoSpecialize
- SDEFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, typeof(syms), typeof(indepsym), typeof(paramsyms),
- Any,
- typeof(_colorvec), typeof(sys)}(_f, _g, mass_matrix, analytic,
- tgrad, jac, jvp, vjp,
- jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, ggprime, syms,
- indepsym, paramsyms, observed,
- _colorvec, sys)
+ SDEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ typeof(sys),
+ }(_f,
+ _g,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ ggprime,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
else
- SDEFunction{iip, specialize, typeof(_f), typeof(_g),
- typeof(mass_matrix), typeof(analytic), typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact), typeof(Wfact_t),
- typeof(paramjac), typeof(ggprime), typeof(syms),
- typeof(indepsym), typeof(paramsyms),
- typeof(observed), typeof(_colorvec), typeof(sys)}(_f, _g, mass_matrix,
- analytic, tgrad, jac,
- jvp, vjp,
+ SDEFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(_g),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(ggprime),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(_colorvec),
+ typeof(sys),
+ }(_f,
+ _g,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
jac_prototype,
- sparsity, Wfact,
+ sparsity,
+ Wfact,
Wfact_t,
- paramjac, ggprime,
- syms, indepsym,
+ paramjac,
+ ggprime,
+ syms,
+ indepsym,
paramsyms,
- observed, _colorvec,
+ observed,
+ _colorvec,
sys)
end
end
-function unwrapped_f(f::SDEFunction, newf = unwrapped_f(f.f),
- newg = unwrapped_f(f.g))
+function unwrapped_f(f::SDEFunction, newf = unwrapped_f(f.f), newg = unwrapped_f(f.g))
specialize = specialization(f)
if specialize === NoSpecialize
- SDEFunction{isinplace(f), specialize, Any, Any,
- typeoff(f.mass_matrix), Any, Any,
- Any, Any, Any, typeof(f.jac_prototype),
- typeof(f.sparsity), Any, Any,
- Any, Any, typeof(f.syms), tyepeof(f.indepsym), typeof(f.paramsyms),
- typeof(f.observed), typeof(f.colorvec), typeof(f.sys)}(newf, newg,
+ SDEFunction{
+ isinplace(f),
+ specialize,
+ Any,
+ Any,
+ typeoff(f.mass_matrix),
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(f.jac_prototype),
+ typeof(f.sparsity),
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(f.syms),
+ tyepeof(f.indepsym),
+ typeof(f.paramsyms),
+ typeof(f.observed),
+ typeof(f.colorvec),
+ typeof(f.sys),
+ }(newf,
+ newg,
f.mass_matrix,
f.analytic,
- f.tgrad, f.jac,
- f.jvp, f.vjp,
+ f.tgrad,
+ f.jac,
+ f.jvp,
+ f.vjp,
f.jac_prototype,
f.sparsity,
f.Wfact,
@@ -3112,17 +3879,37 @@ function unwrapped_f(f::SDEFunction, newf = unwrapped_f(f.f),
f.colorvec,
f.sys)
else
- SDEFunction{isinplace(f), specialize, typeof(newf), typeof(newg),
- typeof(f.mass_matrix), typeof(f.analytic), typeof(f.tgrad),
- typeof(f.jac), typeof(f.jvp), typeof(f.vjp), typeof(f.jac_prototype),
- typeof(f.sparsity), typeof(f.Wfact), typeof(f.Wfact_t),
- typeof(f.paramjac), typeof(f.ggprime), typeof(f.syms),
- typeof(f.indepsym), typeof(f.paramsyms),
- typeof(f.observed), typeof(f.colorvec), typeof(f.sys)}(newf, newg,
+ SDEFunction{
+ isinplace(f),
+ specialize,
+ typeof(newf),
+ typeof(newg),
+ typeof(f.mass_matrix),
+ typeof(f.analytic),
+ typeof(f.tgrad),
+ typeof(f.jac),
+ typeof(f.jvp),
+ typeof(f.vjp),
+ typeof(f.jac_prototype),
+ typeof(f.sparsity),
+ typeof(f.Wfact),
+ typeof(f.Wfact_t),
+ typeof(f.paramjac),
+ typeof(f.ggprime),
+ typeof(f.syms),
+ typeof(f.indepsym),
+ typeof(f.paramsyms),
+ typeof(f.observed),
+ typeof(f.colorvec),
+ typeof(f.sys),
+ }(newf,
+ newg,
f.mass_matrix,
f.analytic,
- f.tgrad, f.jac,
- f.jvp, f.vjp,
+ f.tgrad,
+ f.jac,
+ f.jvp,
+ f.vjp,
f.jac_prototype,
f.sparsity,
f.Wfact,
@@ -3147,79 +3934,181 @@ function SDEFunction(f, g; kwargs...)
end
SDEFunction(f::SDEFunction; kwargs...) = f
-@add_kwonly function SplitSDEFunction(f1, f2, g, mass_matrix, cache, analytic, tgrad, jac,
- jvp, vjp,
- jac_prototype, Wfact, Wfact_t, paramjac, observed,
- syms, indepsym, paramsyms, colorvec, sys)
+@add_kwonly function SplitSDEFunction(f1,
+ f2,
+ g,
+ mass_matrix,
+ cache,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ observed,
+ syms,
+ indepsym,
+ paramsyms,
+ colorvec,
+ sys)
f1 = f1 isa AbstractSciMLOperator ? f1 : SDEFunction(f1)
f2 = SDEFunction(f2)
- SplitFunction{isinplace(f2), typeof(f1), typeof(f2), typeof(g), typeof(mass_matrix),
- typeof(cache), typeof(analytic), typeof(tgrad), typeof(jac), typeof(jvp),
+ SplitFunction{
+ isinplace(f2),
+ typeof(f1),
+ typeof(f2),
+ typeof(g),
+ typeof(mass_matrix),
+ typeof(cache),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
typeof(vjp),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, mass_matrix, cache, analytic, tgrad, jac,
- jac_prototype, Wfact, Wfact_t, paramjac, syms, indepsym,
- paramsyms, observed, colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ mass_matrix,
+ cache,
+ analytic,
+ tgrad,
+ jac,
+ jac_prototype,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
-function SplitSDEFunction{iip, specialize}(f1, f2, g;
- mass_matrix = __has_mass_matrix(f1) ?
- f1.mass_matrix :
- I,
- _func_cache = nothing,
- analytic = __has_analytic(f1) ? f1.analytic :
- nothing,
- tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
- jac = __has_jac(f1) ? f1.jac : nothing,
- jac_prototype = __has_jac_prototype(f1) ?
- f1.jac_prototype : nothing,
- sparsity = __has_sparsity(f1) ? f1.sparsity :
- jac_prototype,
- jvp = __has_jvp(f1) ? f1.jvp : nothing,
- vjp = __has_vjp(f1) ? f1.vjp : nothing,
- Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t :
- nothing,
- paramjac = __has_paramjac(f1) ? f1.paramjac :
- nothing,
- syms = __has_syms(f1) ? f1.syms : nothing,
- indepsym = __has_indepsym(f1) ? f1.indepsym :
- nothing,
- paramsyms = __has_paramsyms(f1) ? f1.paramsyms :
- nothing,
- observed = __has_observed(f1) ? f1.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f1) ? f1.colorvec :
- nothing,
- sys = __has_sys(f1) ? f1.sys : nothing) where {
- iip,
- specialize,
-}
+function SplitSDEFunction{iip, specialize}(f1,
+ f2,
+ g;
+ mass_matrix = __has_mass_matrix(f1) ? f1.mass_matrix : I,
+ _func_cache = nothing,
+ analytic = __has_analytic(f1) ? f1.analytic : nothing,
+ tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
+ jac = __has_jac(f1) ? f1.jac : nothing,
+ jac_prototype = __has_jac_prototype(f1) ? f1.jac_prototype : nothing,
+ sparsity = __has_sparsity(f1) ? f1.sparsity : jac_prototype,
+ jvp = __has_jvp(f1) ? f1.jvp : nothing,
+ vjp = __has_vjp(f1) ? f1.vjp : nothing,
+ Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing,
+ paramjac = __has_paramjac(f1) ? f1.paramjac : nothing,
+ syms = __has_syms(f1) ? f1.syms : nothing,
+ indepsym = __has_indepsym(f1) ? f1.indepsym : nothing,
+ paramsyms = __has_paramsyms(f1) ? f1.paramsyms : nothing,
+ observed = __has_observed(f1) ? f1.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f1) ? f1.colorvec : nothing,
+ sys = __has_sys(f1) ? f1.sys : nothing,) where {iip, specialize}
if specialize === NoSpecialize
- SplitSDEFunction{iip, specialize, Any, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any}(f1, f2, g, mass_matrix, _func_cache,
+ SplitSDEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ }(f1,
+ f2,
+ g,
+ mass_matrix,
+ _func_cache,
analytic,
- tgrad, jac, jvp, vjp, jac_prototype,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
sparsity,
- Wfact, Wfact_t, paramjac, syms,
- indepsym, paramsyms, observed,
- colorvec, sys)
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
else
- SplitSDEFunction{iip, specialize, typeof(f1), typeof(f2), typeof(g),
- typeof(mass_matrix), typeof(_func_cache),
+ SplitSDEFunction{
+ iip,
+ specialize,
+ typeof(f1),
+ typeof(f2),
+ typeof(g),
+ typeof(mass_matrix),
+ typeof(_func_cache),
typeof(analytic),
- typeof(tgrad), typeof(jac), typeof(jvp), typeof(vjp),
- typeof(jac_prototype), typeof(sparsity),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, g, mass_matrix, _func_cache, analytic,
- tgrad, jac, jvp, vjp, jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, syms, indepsym, paramsyms,
- observed, colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ g,
+ mass_matrix,
+ _func_cache,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
end
@@ -3227,85 +4116,190 @@ function SplitSDEFunction(f1, f2, g; kwargs...)
SplitSDEFunction{isinplace(f2, 4)}(f1, f2, g; kwargs...)
end
function SplitSDEFunction{iip}(f1, f2, g; kwargs...) where {iip}
- SplitSDEFunction{iip, FullSpecialize}(SDEFunction(f1, g), SDEFunction{iip}(f2, g),
- g; kwargs...)
+ SplitSDEFunction{iip, FullSpecialize}(SDEFunction(f1, g),
+ SDEFunction{iip}(f2, g),
+ g;
+ kwargs...,)
end
SplitSDEFunction(f::SplitSDEFunction; kwargs...) = f
-@add_kwonly function DynamicalSDEFunction(f1, f2, g, mass_matrix, cache, analytic, tgrad,
- jac, jvp, vjp,
- jac_prototype, Wfact, Wfact_t, paramjac,
- syms, indepsym, paramsyms, observed, colorvec,
- sys)
+@add_kwonly function DynamicalSDEFunction(f1,
+ f2,
+ g,
+ mass_matrix,
+ cache,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
f1 = f1 isa AbstractSciMLOperator ? f1 : SDEFunction(f1)
f2 = SDEFunction(f2)
- DynamicalSDEFunction{isinplace(f2), FullSpecialize, typeof(f1), typeof(f2), typeof(g),
+ DynamicalSDEFunction{
+ isinplace(f2),
+ FullSpecialize,
+ typeof(f1),
+ typeof(f2),
+ typeof(g),
typeof(mass_matrix),
- typeof(cache), typeof(analytic), typeof(tgrad), typeof(jac),
- typeof(jvp), typeof(vjp),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(cache),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, g, mass_matrix, cache, analytic, tgrad,
- jac, jac_prototype, Wfact, Wfact_t, paramjac, syms,
- indepsym, paramsyms, observed, colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ g,
+ mass_matrix,
+ cache,
+ analytic,
+ tgrad,
+ jac,
+ jac_prototype,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
-function DynamicalSDEFunction{iip, specialize}(f1, f2, g;
- mass_matrix = __has_mass_matrix(f1) ?
- f1.mass_matrix : I,
- _func_cache = nothing,
- analytic = __has_analytic(f1) ? f1.analytic :
- nothing,
- tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
- jac = __has_jac(f1) ? f1.jac : nothing,
- jac_prototype = __has_jac_prototype(f1) ?
- f1.jac_prototype : nothing,
- sparsity = __has_sparsity(f1) ? f1.sparsity :
- jac_prototype,
- jvp = __has_jvp(f1) ? f1.jvp : nothing,
- vjp = __has_vjp(f1) ? f1.vjp : nothing,
- Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t :
- nothing,
- paramjac = __has_paramjac(f1) ? f1.paramjac :
- nothing,
- syms = __has_syms(f1) ? f1.syms : nothing,
- indepsym = __has_indepsym(f1) ? f1.indepsym :
- nothing,
- paramsyms = __has_paramsyms(f1) ?
- f1.paramsyms : nothing,
- observed = __has_observed(f1) ? f1.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f1) ? f1.colorvec :
- nothing,
- sys = __has_sys(f1) ? f1.sys : nothing) where {
- iip,
- specialize,
-}
+function DynamicalSDEFunction{iip, specialize}(f1,
+ f2,
+ g;
+ mass_matrix = __has_mass_matrix(f1) ? f1.mass_matrix : I,
+ _func_cache = nothing,
+ analytic = __has_analytic(f1) ? f1.analytic : nothing,
+ tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
+ jac = __has_jac(f1) ? f1.jac : nothing,
+ jac_prototype = __has_jac_prototype(f1) ? f1.jac_prototype : nothing,
+ sparsity = __has_sparsity(f1) ? f1.sparsity : jac_prototype,
+ jvp = __has_jvp(f1) ? f1.jvp : nothing,
+ vjp = __has_vjp(f1) ? f1.vjp : nothing,
+ Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing,
+ paramjac = __has_paramjac(f1) ? f1.paramjac : nothing,
+ syms = __has_syms(f1) ? f1.syms : nothing,
+ indepsym = __has_indepsym(f1) ? f1.indepsym : nothing,
+ paramsyms = __has_paramsyms(f1) ? f1.paramsyms : nothing,
+ observed = __has_observed(f1) ? f1.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f1) ? f1.colorvec : nothing,
+ sys = __has_sys(f1) ? f1.sys : nothing,) where {iip, specialize}
if specialize === NoSpecialize
- DynamicalSDEFunction{iip, specialize, Any, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any}(f1, f2, g, mass_matrix,
+ DynamicalSDEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ }(f1,
+ f2,
+ g,
+ mass_matrix,
_func_cache,
- analytic, tgrad, jac, jvp, vjp,
- jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, syms,
- indepsym, paramsyms, observed,
- colorvec, sys)
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
else
- DynamicalSDEFunction{iip, specialize, typeof(f1), typeof(f2), typeof(g),
- typeof(mass_matrix), typeof(_func_cache),
+ DynamicalSDEFunction{
+ iip,
+ specialize,
+ typeof(f1),
+ typeof(f2),
+ typeof(g),
+ typeof(mass_matrix),
+ typeof(_func_cache),
typeof(analytic),
- typeof(tgrad), typeof(jac), typeof(jvp), typeof(vjp),
- typeof(jac_prototype), typeof(sparsity),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, g, mass_matrix, _func_cache, analytic,
- tgrad, jac, jvp, vjp, jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, syms, indepsym,
- paramsyms, observed, colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ g,
+ mass_matrix,
+ _func_cache,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
end
@@ -3314,38 +4308,31 @@ function DynamicalSDEFunction(f1, f2, g; kwargs...)
end
function DynamicalSDEFunction{iip}(f1, f2, g; kwargs...) where {iip}
DynamicalSDEFunction{iip, FullSpecialize}(SDEFunction{iip}(f1, g),
- SDEFunction{iip}(f2, g), g; kwargs...)
+ SDEFunction{iip}(f2, g),
+ g;
+ kwargs...,)
end
DynamicalSDEFunction(f::DynamicalSDEFunction; kwargs...) = f
function RODEFunction{iip, specialize}(f;
- mass_matrix = __has_mass_matrix(f) ? f.mass_matrix :
- I,
- analytic = __has_analytic(f) ? f.analytic : nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype :
- nothing,
- sparsity = __has_sparsity(f) ? f.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
- paramjac = __has_paramjac(f) ? f.paramjac : nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- sys = __has_sys(f) ? f.sys : nothing,
- analytic_full = __has_analytic_full(f) ?
- f.analytic_full : false) where {iip,
- specialize,
-}
+ mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,
+ analytic_full = __has_analytic_full(f) ? f.analytic_full : false,) where {iip, specialize}
if jac === nothing && isa(jac_prototype, AbstractSciMLOperator)
if iip
jac = update_coefficients! #(J,u,p,t)
@@ -3354,7 +4341,8 @@ function RODEFunction{iip, specialize}(f;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterface.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterface.matrix_colors(jac_prototype)
else
@@ -3383,29 +4371,87 @@ function RODEFunction{iip, specialize}(f;
_f = prepare_function(f)
if specialize === NoSpecialize
- RODEFunction{iip, specialize, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any, Any,
- typeof(syms), typeof(indepsym), typeof(paramsyms), Any,
- typeof(_colorvec), Any}(_f, mass_matrix, analytic,
+ RODEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ Any,
+ }(_f,
+ mass_matrix,
+ analytic,
tgrad,
- jac, jvp, vjp,
+ jac,
+ jvp,
+ vjp,
jac_prototype,
- sparsity, Wfact, Wfact_t,
- paramjac, syms, indepsym,
- paramsyms, observed,
- _colorvec, sys,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys,
analytic_full)
else
- RODEFunction{iip, specialize, typeof(_f), typeof(mass_matrix),
- typeof(analytic), typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact), typeof(Wfact_t),
- typeof(paramjac), typeof(syms), typeof(indepsym), typeof(paramsyms),
- typeof(observed), typeof(_colorvec),
- typeof(sys)}(_f, mass_matrix, analytic, tgrad,
- jac, jvp, vjp, jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, syms, indepsym, paramsyms,
- observed, _colorvec, sys, analytic_full)
+ RODEFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(_colorvec),
+ typeof(sys),
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys,
+ analytic_full)
end
end
@@ -3419,29 +4465,22 @@ end
RODEFunction(f::RODEFunction; kwargs...) = f
function DAEFunction{iip, specialize}(f;
- analytic = __has_analytic(f) ? f.analytic : nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype :
- nothing,
- sparsity = __has_sparsity(f) ? f.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
- paramjac = __has_paramjac(f) ? f.paramjac : nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- sys = __has_sys(f) ? f.sys : nothing) where {iip,
- specialize,
-}
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
if jac === nothing && isa(jac_prototype, AbstractSciMLOperator)
if iip
jac = update_coefficients! #(J,u,p,t)
@@ -3450,7 +4489,8 @@ function DAEFunction{iip, specialize}(f;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterface.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterface.matrix_colors(jac_prototype)
else
@@ -3471,25 +4511,81 @@ function DAEFunction{iip, specialize}(f;
_f = prepare_function(f)
if specialize === NoSpecialize
- DAEFunction{iip, specialize, Any, Any, Any,
- Any, Any, Any, Any, Any,
- Any, Any, Any, typeof(syms),
- typeof(indepsym), typeof(paramsyms),
- Any, typeof(_colorvec), Any}(_f, analytic, tgrad, jac, jvp,
- vjp, jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, syms,
- indepsym, paramsyms, observed,
- _colorvec, sys)
+ DAEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ Any,
+ }(_f,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
else
- DAEFunction{iip, specialize, typeof(_f), typeof(analytic), typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact), typeof(Wfact_t),
- typeof(paramjac), typeof(syms), typeof(indepsym), typeof(paramsyms),
- typeof(observed), typeof(_colorvec),
- typeof(sys)}(_f, analytic, tgrad, jac, jvp, vjp,
- jac_prototype, sparsity, Wfact, Wfact_t,
- paramjac, syms, indepsym, paramsyms, observed,
- _colorvec, sys)
+ DAEFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(_colorvec),
+ typeof(sys),
+ }(_f,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
end
end
@@ -3501,31 +4597,23 @@ DAEFunction(f; kwargs...) = DAEFunction{isinplace(f, 5), FullSpecialize}(f; kwar
DAEFunction(f::DAEFunction; kwargs...) = f
function DDEFunction{iip, specialize}(f;
- mass_matrix = __has_mass_matrix(f) ? f.mass_matrix :
- I,
- analytic = __has_analytic(f) ? f.analytic : nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype :
- nothing,
- sparsity = __has_sparsity(f) ? f.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
- paramjac = __has_paramjac(f) ? f.paramjac : nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- sys = __has_sys(f) ? f.sys : nothing) where {iip,
- specialize,
-}
+ mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
if jac === nothing && isa(jac_prototype, AbstractSciMLOperator)
if iip
jac = update_coefficients! #(J,u,p,t)
@@ -3534,7 +4622,8 @@ function DDEFunction{iip, specialize}(f;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterface.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterface.matrix_colors(jac_prototype)
else
@@ -3549,7 +4638,12 @@ function DDEFunction{iip, specialize}(f;
Wfact_tiip = Wfact_t !== nothing ? isinplace(Wfact_t, 6, "Wfact_t", iip) : iip
paramjaciip = paramjac !== nothing ? isinplace(paramjac, 5, "paramjac", iip) : iip
- nonconforming = (jaciip, tgradiip, jvpiip, vjpiip, Wfactiip, Wfact_tiip,
+ nonconforming = (jaciip,
+ tgradiip,
+ jvpiip,
+ vjpiip,
+ Wfactiip,
+ Wfact_tiip,
paramjaciip) .!= iip
if any(nonconforming)
nonconforming = findall(nonconforming)
@@ -3560,33 +4654,85 @@ function DDEFunction{iip, specialize}(f;
_f = prepare_function(f)
if specialize === NoSpecialize
- DDEFunction{iip, specialize, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any, Any,
- Any, typeof(syms), typeof(indepsym), typeof(paramsyms),
- Any, typeof(_colorvec), Any}(_f, mass_matrix,
+ DDEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ Any,
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
+ else
+ DDEFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(_colorvec),
+ typeof(sys),
+ }(_f,
+ mass_matrix,
analytic,
tgrad,
- jac, jvp, vjp,
+ jac,
+ jvp,
+ vjp,
jac_prototype,
- sparsity, Wfact,
+ sparsity,
+ Wfact,
Wfact_t,
- paramjac, syms,
- indepsym, paramsyms,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
observed,
- _colorvec, sys)
- else
- DDEFunction{iip, specialize, typeof(_f), typeof(mass_matrix), typeof(analytic),
- typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact), typeof(Wfact_t),
- typeof(paramjac), typeof(syms), typeof(indepsym), typeof(paramsyms),
- typeof(observed),
- typeof(_colorvec), typeof(sys)}(_f, mass_matrix, analytic,
- tgrad, jac, jvp, vjp,
- jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac,
- syms, indepsym, paramsyms, observed,
- _colorvec, sys)
+ _colorvec,
+ sys)
end
end
@@ -3597,86 +4743,169 @@ DDEFunction{iip}(f::DDEFunction; kwargs...) where {iip} = f
DDEFunction(f; kwargs...) = DDEFunction{isinplace(f, 5), FullSpecialize}(f; kwargs...)
DDEFunction(f::DDEFunction; kwargs...) = f
-@add_kwonly function DynamicalDDEFunction{iip}(f1, f2, mass_matrix, analytic, tgrad, jac,
- jvp, vjp,
- jac_prototype, sparsity, Wfact, Wfact_t,
- paramjac,
- syms, indepsym, paramsyms, observed,
- colorvec) where {iip}
+@add_kwonly function DynamicalDDEFunction{iip}(f1,
+ f2,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec) where {iip}
f1 = f1 isa AbstractSciMLOperator ? f1 : DDEFunction(f1)
f2 = DDEFunction(f2)
- DynamicalDDEFunction{isinplace(f2), FullSpecialize, typeof(f1), typeof(f2),
+ DynamicalDDEFunction{
+ isinplace(f2),
+ FullSpecialize,
+ typeof(f1),
+ typeof(f2),
typeof(mass_matrix),
- typeof(analytic), typeof(tgrad), typeof(jac), typeof(jvp),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
typeof(vjp),
typeof(jac_prototype),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, mass_matrix, analytic, tgrad, jac, jvp,
- vjp, jac_prototype, sparsity, Wfact, Wfact_t,
- paramjac, syms, indepsym, paramsyms, observed,
- colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
-function DynamicalDDEFunction{iip, specialize}(f1, f2;
- mass_matrix = __has_mass_matrix(f1) ?
- f1.mass_matrix : I,
- analytic = __has_analytic(f1) ? f1.analytic :
- nothing,
- tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
- jac = __has_jac(f1) ? f1.jac : nothing,
- jvp = __has_jvp(f1) ? f1.jvp : nothing,
- vjp = __has_vjp(f1) ? f1.vjp : nothing,
- jac_prototype = __has_jac_prototype(f1) ?
- f1.jac_prototype : nothing,
- sparsity = __has_sparsity(f1) ? f1.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t :
- nothing,
- paramjac = __has_paramjac(f1) ? f1.paramjac :
- nothing,
- syms = __has_syms(f1) ? f1.syms : nothing,
- indepsym = __has_indepsym(f1) ? f1.indepsym :
- nothing,
- paramsyms = __has_paramsyms(f1) ?
- f1.paramsyms : nothing,
- observed = __has_observed(f1) ? f1.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f1) ? f1.colorvec :
- nothing,
- sys = __has_sys(f1) ? f1.sys : nothing) where {
- iip,
- specialize,
-}
+function DynamicalDDEFunction{iip, specialize}(f1,
+ f2;
+ mass_matrix = __has_mass_matrix(f1) ? f1.mass_matrix : I,
+ analytic = __has_analytic(f1) ? f1.analytic : nothing,
+ tgrad = __has_tgrad(f1) ? f1.tgrad : nothing,
+ jac = __has_jac(f1) ? f1.jac : nothing,
+ jvp = __has_jvp(f1) ? f1.jvp : nothing,
+ vjp = __has_vjp(f1) ? f1.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f1) ? f1.jac_prototype : nothing,
+ sparsity = __has_sparsity(f1) ? f1.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f1) ? f1.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f1) ? f1.Wfact_t : nothing,
+ paramjac = __has_paramjac(f1) ? f1.paramjac : nothing,
+ syms = __has_syms(f1) ? f1.syms : nothing,
+ indepsym = __has_indepsym(f1) ? f1.indepsym : nothing,
+ paramsyms = __has_paramsyms(f1) ? f1.paramsyms : nothing,
+ observed = __has_observed(f1) ? f1.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f1) ? f1.colorvec : nothing,
+ sys = __has_sys(f1) ? f1.sys : nothing,) where {iip, specialize}
if specialize === NoSpecialize
- DynamicalDDEFunction{iip, specialize, Any, Any, Any, Any, Any, Any, Any, Any, Any,
- Any, Any,
- Any, Any, Any, Any, Any, Any, Any, Any}(f1, f2, mass_matrix,
+ DynamicalDDEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ }(f1,
+ f2,
+ mass_matrix,
analytic,
tgrad,
- jac, jvp, vjp,
+ jac,
+ jvp,
+ vjp,
jac_prototype,
sparsity,
- Wfact, Wfact_t,
+ Wfact,
+ Wfact_t,
paramjac,
- syms, indepsym,
+ syms,
+ indepsym,
paramsyms,
- observed, colorvec,
+ observed,
+ colorvec,
sys)
else
- DynamicalDDEFunction{iip, typeof(f1), typeof(f2), typeof(mass_matrix),
+ DynamicalDDEFunction{
+ iip,
+ typeof(f1),
+ typeof(f2),
+ typeof(mass_matrix),
typeof(analytic),
- typeof(tgrad), typeof(jac), typeof(jvp), typeof(vjp),
- typeof(jac_prototype), typeof(sparsity),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
typeof(colorvec),
- typeof(sys)}(f1, f2, mass_matrix, analytic, tgrad, jac, jvp,
- vjp, jac_prototype, sparsity,
- Wfact, Wfact_t, paramjac, syms, indepsym,
- paramsyms, observed,
- colorvec, sys)
+ typeof(sys),
+ }(f1,
+ f2,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ colorvec,
+ sys)
end
end
@@ -3685,37 +4914,31 @@ function DynamicalDDEFunction(f1, f2 = nothing; kwargs...)
end
function DynamicalDDEFunction{iip}(f1, f2; kwargs...) where {iip}
DynamicalDDEFunction{iip, FullSpecialize}(DDEFunction{iip}(f1),
- DDEFunction{iip}(f2); kwargs...)
+ DDEFunction{iip}(f2);
+ kwargs...,)
end
DynamicalDDEFunction(f::DynamicalDDEFunction; kwargs...) = f
-function SDDEFunction{iip, specialize}(f, g;
- mass_matrix = __has_mass_matrix(f) ? f.mass_matrix :
- I,
- analytic = __has_analytic(f) ? f.analytic : nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype :
- nothing,
- sparsity = __has_sparsity(f) ? f.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
- paramjac = __has_paramjac(f) ? f.paramjac : nothing,
- ggprime = nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- sys = __has_sys(f) ? f.sys : nothing) where {iip,
- specialize,
-}
+function SDDEFunction{iip, specialize}(f,
+ g;
+ mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ ggprime = nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
if jac === nothing && isa(jac_prototype, AbstractSciMLOperator)
if iip
jac = update_coefficients! #(J,u,p,t)
@@ -3724,7 +4947,8 @@ function SDDEFunction{iip, specialize}(f, g;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterface.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterface.matrix_colors(jac_prototype)
else
@@ -3735,37 +4959,93 @@ function SDDEFunction{iip, specialize}(f, g;
_g = prepare_function(g)
if specialize === NoSpecialize
- SDDEFunction{iip, specialize, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any, Any,
- Any, Any, typeof(syms), typeof(indepsym), typeof(paramsyms),
- Any, typeof(_colorvec), Any}(_f, _g, mass_matrix,
- analytic, tgrad,
+ SDDEFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ Any,
+ }(_f,
+ _g,
+ mass_matrix,
+ analytic,
+ tgrad,
jac,
jvp,
vjp,
jac_prototype,
- sparsity, Wfact,
+ sparsity,
+ Wfact,
Wfact_t,
- paramjac, ggprime,
- syms, indepsym, paramsyms,
+ paramjac,
+ ggprime,
+ syms,
+ indepsym,
+ paramsyms,
observed,
_colorvec,
sys)
else
- SDDEFunction{iip, specialize, typeof(_f), typeof(_g),
- typeof(mass_matrix), typeof(analytic), typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact), typeof(Wfact_t),
- typeof(paramjac), typeof(ggprime), typeof(syms), typeof(indepsym),
- typeof(paramsyms), typeof(observed),
- typeof(_colorvec), typeof(sys)}(_f, _g, mass_matrix,
- analytic, tgrad, jac,
- jvp, vjp, jac_prototype,
- sparsity, Wfact,
+ SDDEFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(_g),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(ggprime),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(_colorvec),
+ typeof(sys),
+ }(_f,
+ _g,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
+ Wfact,
Wfact_t,
- paramjac, ggprime, syms,
- indepsym, paramsyms,
- observed, _colorvec, sys)
+ paramjac,
+ ggprime,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
end
end
@@ -3779,35 +5059,23 @@ end
SDDEFunction(f::SDDEFunction; kwargs...) = f
function NonlinearFunction{iip, specialize}(f;
- mass_matrix = __has_mass_matrix(f) ?
- f.mass_matrix :
- I,
- analytic = __has_analytic(f) ? f.analytic :
- nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype : nothing,
- sparsity = __has_sparsity(f) ? f.sparsity :
- jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t :
- nothing,
- paramjac = __has_paramjac(f) ? f.paramjac :
- nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms :
- nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED_NO_TIME,
- colorvec = __has_colorvec(f) ? f.colorvec :
- nothing,
- sys = __has_sys(f) ? f.sys : nothing,
- resid_prototype = __has_resid_prototype(f) ? f.resid_prototype : nothing) where {
- iip, specialize}
-
+ mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED_NO_TIME,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,
+ resid_prototype = __has_resid_prototype(f) ? f.resid_prototype : nothing,) where {iip, specialize}
if mass_matrix === I && f isa Tuple
mass_matrix = ((I for i in 1:length(f))...,)
end
@@ -3820,7 +5088,8 @@ function NonlinearFunction{iip, specialize}(f;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterface.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterface.matrix_colors(jac_prototype)
else
@@ -3841,33 +5110,85 @@ function NonlinearFunction{iip, specialize}(f;
_f = prepare_function(f)
if specialize === NoSpecialize
- NonlinearFunction{iip, specialize,
- Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any,
- Any, Any, typeof(syms), typeof(paramsyms), Any,
- typeof(_colorvec), Any, Any}(_f, mass_matrix,
- analytic, tgrad, jac,
- jvp, vjp,
+ NonlinearFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ Any,
+ Any,
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
jac_prototype,
- sparsity, Wfact,
- Wfact_t, paramjac,
- syms, paramsyms, observed,
- _colorvec, sys, resid_prototype)
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys,
+ resid_prototype)
else
- NonlinearFunction{iip, specialize,
- typeof(_f), typeof(mass_matrix), typeof(analytic), typeof(tgrad),
- typeof(jac), typeof(jvp), typeof(vjp), typeof(jac_prototype),
- typeof(sparsity), typeof(Wfact),
- typeof(Wfact_t), typeof(paramjac), typeof(syms),
+ NonlinearFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
typeof(paramsyms),
typeof(observed),
- typeof(_colorvec), typeof(sys), typeof(resid_prototype)}(_f, mass_matrix,
- analytic, tgrad, jac,
- jvp, vjp, jac_prototype, sparsity,
+ typeof(_colorvec),
+ typeof(sys),
+ typeof(resid_prototype),
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
+ jac_prototype,
+ sparsity,
Wfact,
- Wfact_t, paramjac, syms,
+ Wfact_t,
+ paramjac,
+ syms,
paramsyms,
- observed, _colorvec, sys, resid_prototype)
+ observed,
+ _colorvec,
+ sys,
+ resid_prototype)
end
end
@@ -3881,41 +5202,57 @@ end
NonlinearFunction(f::NonlinearFunction; kwargs...) = f
function IntervalNonlinearFunction{iip, specialize}(f;
- analytic = __has_analytic(f) ?
- f.analytic :
- nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- paramsyms = __has_paramsyms(f) ?
- f.paramsyms :
- nothing,
- observed = __has_observed(f) ?
- f.observed :
- DEFAULT_OBSERVED_NO_TIME,
- sys = __has_sys(f) ? f.sys : nothing) where {
- iip,
- specialize,
-}
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED_NO_TIME,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize}
_f = prepare_function(f)
if specialize === NoSpecialize
- IntervalNonlinearFunction{iip, specialize,
- Any, Any, typeof(syms), typeof(paramsyms), Any,
- typeof(_colorvec), Any}(_f, mass_matrix,
- analytic, tgrad, jac,
- jvp, vjp,
+ IntervalNonlinearFunction{
+ iip,
+ specialize,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ Any,
+ }(_f,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ jvp,
+ vjp,
jac_prototype,
- sparsity, Wfact,
- Wfact_t, paramjac,
- syms, paramsyms, observed,
- _colorvec, sys)
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ paramsyms,
+ observed,
+ _colorvec,
+ sys)
else
- IntervalNonlinearFunction{iip, specialize,
- typeof(_f), typeof(analytic), typeof(syms),
+ IntervalNonlinearFunction{
+ iip,
+ specialize,
+ typeof(_f),
+ typeof(analytic),
+ typeof(syms),
typeof(paramsyms),
typeof(observed),
- typeof(sys)}(_f, analytic, syms,
+ typeof(sys),
+ }(_f,
+ analytic,
+ syms,
paramsyms,
- observed, sys)
+ observed,
+ sys)
end
end
@@ -3933,68 +5270,103 @@ struct NoAD <: AbstractADType end
(f::OptimizationFunction)(args...) = f.f(args...)
OptimizationFunction(args...; kwargs...) = OptimizationFunction{true}(args...; kwargs...)
-function OptimizationFunction{iip}(f, adtype::AbstractADType = NoAD();
- grad = nothing, hess = nothing, hv = nothing,
- cons = nothing, cons_j = nothing, cons_h = nothing,
- hess_prototype = nothing,
- cons_jac_prototype = __has_jac_prototype(f) ?
- f.jac_prototype : nothing,
- cons_hess_prototype = nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
- observed = __has_observed(f) ? f.observed :
- DEFAULT_OBSERVED_NO_TIME,
- expr = nothing, cons_expr = nothing,
- sys = __has_sys(f) ? f.sys : nothing,
- lag_h = nothing, lag_hess_prototype = nothing,
- hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- cons_jac_colorvec = __has_colorvec(f) ? f.colorvec :
- nothing,
- cons_hess_colorvec = __has_colorvec(f) ? f.colorvec :
- nothing,
- lag_hess_colorvec = nothing) where {iip}
+function OptimizationFunction{iip}(f,
+ adtype::AbstractADType = NoAD();
+ grad = nothing,
+ hess = nothing,
+ hv = nothing,
+ cons = nothing,
+ cons_j = nothing,
+ cons_h = nothing,
+ hess_prototype = nothing,
+ cons_jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ cons_hess_prototype = nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED_NO_TIME,
+ expr = nothing,
+ cons_expr = nothing,
+ sys = __has_sys(f) ? f.sys : nothing,
+ lag_h = nothing,
+ lag_hess_prototype = nothing,
+ hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ cons_jac_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ cons_hess_colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ lag_hess_colorvec = nothing,) where {iip}
isinplace(f, 2; has_two_dispatches = false, isoptimization = true)
- OptimizationFunction{iip, typeof(adtype), typeof(f), typeof(grad), typeof(hess),
- typeof(hv),
- typeof(cons), typeof(cons_j), typeof(cons_h),
- typeof(hess_prototype),
- typeof(cons_jac_prototype), typeof(cons_hess_prototype),
- typeof(syms), typeof(paramsyms), typeof(observed),
- typeof(expr), typeof(cons_expr), typeof(sys), typeof(lag_h),
- typeof(lag_hess_prototype), typeof(hess_colorvec),
- typeof(cons_jac_colorvec), typeof(cons_hess_colorvec),
- typeof(lag_hess_colorvec)
- }(f, adtype, grad, hess,
- hv, cons, cons_j, cons_h,
- hess_prototype, cons_jac_prototype,
- cons_hess_prototype, syms,
- paramsyms, observed, expr, cons_expr, sys,
- lag_h, lag_hess_prototype, hess_colorvec, cons_jac_colorvec,
- cons_hess_colorvec, lag_hess_colorvec)
+ OptimizationFunction{
+ iip,
+ typeof(adtype),
+ typeof(f),
+ typeof(grad),
+ typeof(hess),
+ typeof(hv),
+ typeof(cons),
+ typeof(cons_j),
+ typeof(cons_h),
+ typeof(hess_prototype),
+ typeof(cons_jac_prototype),
+ typeof(cons_hess_prototype),
+ typeof(syms),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(expr),
+ typeof(cons_expr),
+ typeof(sys),
+ typeof(lag_h),
+ typeof(lag_hess_prototype),
+ typeof(hess_colorvec),
+ typeof(cons_jac_colorvec),
+ typeof(cons_hess_colorvec),
+ typeof(lag_hess_colorvec),
+ }(f,
+ adtype,
+ grad,
+ hess,
+ hv,
+ cons,
+ cons_j,
+ cons_h,
+ hess_prototype,
+ cons_jac_prototype,
+ cons_hess_prototype,
+ syms,
+ paramsyms,
+ observed,
+ expr,
+ cons_expr,
+ sys,
+ lag_h,
+ lag_hess_prototype,
+ hess_colorvec,
+ cons_jac_colorvec,
+ cons_hess_colorvec,
+ lag_hess_colorvec)
end
-function BVPFunction{iip, specialize, twopoint}(f, bc;
- mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
- analytic = __has_analytic(f) ? f.analytic : nothing,
- tgrad = __has_tgrad(f) ? f.tgrad : nothing,
- jac = __has_jac(f) ? f.jac : nothing,
- bcjac = __has_jac(bc) ? bc.jac : nothing,
- jvp = __has_jvp(f) ? f.jvp : nothing,
- vjp = __has_vjp(f) ? f.vjp : nothing,
- jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
- bcjac_prototype = __has_jac_prototype(bc) ? bc.jac_prototype : nothing,
- bcresid_prototype = nothing,
- sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
- Wfact = __has_Wfact(f) ? f.Wfact : nothing,
- Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
- paramjac = __has_paramjac(f) ? f.paramjac : nothing,
- syms = __has_syms(f) ? f.syms : nothing,
- indepsym = __has_indepsym(f) ? f.indepsym : nothing,
- paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
- observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
- colorvec = __has_colorvec(f) ? f.colorvec : nothing,
- bccolorvec = __has_colorvec(bc) ? bc.colorvec : nothing,
- sys = __has_sys(f) ? f.sys : nothing) where {iip, specialize, twopoint}
+function BVPFunction{iip, specialize, twopoint}(f,
+ bc;
+ mass_matrix = __has_mass_matrix(f) ? f.mass_matrix : I,
+ analytic = __has_analytic(f) ? f.analytic : nothing,
+ tgrad = __has_tgrad(f) ? f.tgrad : nothing,
+ jac = __has_jac(f) ? f.jac : nothing,
+ bcjac = __has_jac(bc) ? bc.jac : nothing,
+ jvp = __has_jvp(f) ? f.jvp : nothing,
+ vjp = __has_vjp(f) ? f.vjp : nothing,
+ jac_prototype = __has_jac_prototype(f) ? f.jac_prototype : nothing,
+ bcjac_prototype = __has_jac_prototype(bc) ? bc.jac_prototype : nothing,
+ bcresid_prototype = nothing,
+ sparsity = __has_sparsity(f) ? f.sparsity : jac_prototype,
+ Wfact = __has_Wfact(f) ? f.Wfact : nothing,
+ Wfact_t = __has_Wfact_t(f) ? f.Wfact_t : nothing,
+ paramjac = __has_paramjac(f) ? f.paramjac : nothing,
+ syms = __has_syms(f) ? f.syms : nothing,
+ indepsym = __has_indepsym(f) ? f.indepsym : nothing,
+ paramsyms = __has_paramsyms(f) ? f.paramsyms : nothing,
+ observed = __has_observed(f) ? f.observed : DEFAULT_OBSERVED,
+ colorvec = __has_colorvec(f) ? f.colorvec : nothing,
+ bccolorvec = __has_colorvec(bc) ? bc.colorvec : nothing,
+ sys = __has_sys(f) ? f.sys : nothing,) where {iip, specialize, twopoint}
if mass_matrix === I && f isa Tuple
mass_matrix = ((I for i in 1:length(f))...,)
end
@@ -4020,14 +5392,16 @@ function BVPFunction{iip, specialize, twopoint}(f, bc;
end
end
- if jac_prototype !== nothing && colorvec === nothing &&
+ if jac_prototype !== nothing &&
+ colorvec === nothing &&
ArrayInterfaceCore.fast_matrix_colors(jac_prototype)
_colorvec = ArrayInterfaceCore.matrix_colors(jac_prototype)
else
_colorvec = colorvec
end
- if bcjac_prototype !== nothing && bccolorvec === nothing &&
+ if bcjac_prototype !== nothing &&
+ bccolorvec === nothing &&
ArrayInterfaceCore.fast_matrix_colors(bcjac_prototype)
_bccolorvec = ArrayInterfaceCore.matrix_colors(bcjac_prototype)
else
@@ -4051,7 +5425,8 @@ function BVPFunction{iip, specialize, twopoint}(f, bc;
else
@assert length(bcjac) == 2
bcjac = Tuple(bcjac)
- if isinplace(first(bcjac), 3, "bcjac", bciip) != isinplace(last(bcjac), 3, "bcjac", bciip)
+ if isinplace(first(bcjac), 3, "bcjac", bciip) !=
+ isinplace(last(bcjac), 3, "bcjac", bciip)
throw(NonconformingFunctionsError(["bcjac[1]", "bcjac[2]"]))
end
isinplace(bcjac, 3, "bcjac", iip)
@@ -4066,13 +5441,28 @@ function BVPFunction{iip, specialize, twopoint}(f, bc;
Wfact_tiip = Wfact_t !== nothing ? isinplace(Wfact_t, 5, "Wfact_t", iip) : iip
paramjaciip = paramjac !== nothing ? isinplace(paramjac, 4, "paramjac", iip) : iip
- nonconforming = (bciip, jaciip, tgradiip, jvpiip, vjpiip, Wfactiip, Wfact_tiip,
+ nonconforming = (bciip,
+ jaciip,
+ tgradiip,
+ jvpiip,
+ vjpiip,
+ Wfactiip,
+ Wfact_tiip,
paramjaciip) .!= iip
bc_nonconforming = bcjaciip .!= bciip
if any(nonconforming)
nonconforming = findall(nonconforming)
- functions = ["bc", "jac", "bcjac", "tgrad", "jvp", "vjp", "Wfact", "Wfact_t",
- "paramjac"][nonconforming]
+ functions = [
+ "bc",
+ "jac",
+ "bcjac",
+ "tgrad",
+ "jvp",
+ "vjp",
+ "Wfact",
+ "Wfact_t",
+ "paramjac",
+ ][nonconforming]
throw(NonconformingFunctionsError(functions))
end
@@ -4085,9 +5475,13 @@ function BVPFunction{iip, specialize, twopoint}(f, bc;
last(bcresid_prototype))
end
- bccolorvec !== nothing && length(bccolorvec) == 2 && (bccolorvec = Tuple(bccolorvec))
+ bccolorvec !== nothing &&
+ length(bccolorvec) == 2 &&
+ (bccolorvec = Tuple(bccolorvec))
- bcjac_prototype !== nothing && length(bcjac_prototype) == 2 && (bcjac_prototype = Tuple(bcjac_prototype))
+ bcjac_prototype !== nothing &&
+ length(bcjac_prototype) == 2 &&
+ (bcjac_prototype = Tuple(bcjac_prototype))
end
if any(bc_nonconforming)
@@ -4099,36 +5493,118 @@ function BVPFunction{iip, specialize, twopoint}(f, bc;
_f = prepare_function(f)
if specialize === NoSpecialize
- BVPFunction{iip, specialize, twopoint, Any, Any, Any, Any, Any,
- Any, Any, Any, Any, Any, Any, Any, Any, Any, Any,
- Any, typeof(syms), typeof(indepsym), typeof(paramsyms),
- Any, typeof(_colorvec), typeof(_bccolorvec), Any}(_f, bc, mass_matrix,
- analytic, tgrad, jac, bcjac, jvp, vjp, jac_prototype,
- bcjac_prototype, bcresid_prototype,
- sparsity, Wfact, Wfact_t, paramjac, syms, indepsym, paramsyms, observed,
- _colorvec, _bccolorvec, sys)
+ BVPFunction{
+ iip,
+ specialize,
+ twopoint,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ Any,
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ Any,
+ typeof(_colorvec),
+ typeof(_bccolorvec),
+ Any,
+ }(_f,
+ bc,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ bcjac,
+ jvp,
+ vjp,
+ jac_prototype,
+ bcjac_prototype,
+ bcresid_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ _bccolorvec,
+ sys)
else
- BVPFunction{iip, specialize, twopoint, typeof(_f), typeof(bc), typeof(mass_matrix),
- typeof(analytic), typeof(tgrad), typeof(jac), typeof(bcjac), typeof(jvp),
- typeof(vjp), typeof(jac_prototype),
- typeof(bcjac_prototype), typeof(bcresid_prototype), typeof(sparsity),
- typeof(Wfact), typeof(Wfact_t), typeof(paramjac), typeof(syms),
- typeof(indepsym), typeof(paramsyms), typeof(observed),
- typeof(_colorvec), typeof(_bccolorvec), typeof(sys)}(_f, bc, mass_matrix, analytic,
- tgrad, jac, bcjac, jvp, vjp,
- jac_prototype, bcjac_prototype, bcresid_prototype, sparsity,
- Wfact, Wfact_t, paramjac,
- syms, indepsym, paramsyms, observed,
- _colorvec, _bccolorvec, sys)
+ BVPFunction{
+ iip,
+ specialize,
+ twopoint,
+ typeof(_f),
+ typeof(bc),
+ typeof(mass_matrix),
+ typeof(analytic),
+ typeof(tgrad),
+ typeof(jac),
+ typeof(bcjac),
+ typeof(jvp),
+ typeof(vjp),
+ typeof(jac_prototype),
+ typeof(bcjac_prototype),
+ typeof(bcresid_prototype),
+ typeof(sparsity),
+ typeof(Wfact),
+ typeof(Wfact_t),
+ typeof(paramjac),
+ typeof(syms),
+ typeof(indepsym),
+ typeof(paramsyms),
+ typeof(observed),
+ typeof(_colorvec),
+ typeof(_bccolorvec),
+ typeof(sys),
+ }(_f,
+ bc,
+ mass_matrix,
+ analytic,
+ tgrad,
+ jac,
+ bcjac,
+ jvp,
+ vjp,
+ jac_prototype,
+ bcjac_prototype,
+ bcresid_prototype,
+ sparsity,
+ Wfact,
+ Wfact_t,
+ paramjac,
+ syms,
+ indepsym,
+ paramsyms,
+ observed,
+ _colorvec,
+ _bccolorvec,
+ sys)
end
end
-function BVPFunction{iip}(f, bc; twopoint::Union{Val, Bool}=Val(false),
- kwargs...) where {iip}
+function BVPFunction{iip}(f,
+ bc;
+ twopoint::Union{Val, Bool} = Val(false),
+ kwargs...,) where {iip}
BVPFunction{iip, FullSpecialize, _unwrap_val(twopoint)}(f, bc; kwargs...)
end
BVPFunction{iip}(f::BVPFunction, bc; kwargs...) where {iip} = f
-function BVPFunction(f, bc; twopoint::Union{Val, Bool}=Val(false), kwargs...)
+function BVPFunction(f, bc; twopoint::Union{Val, Bool} = Val(false), kwargs...)
BVPFunction{isinplace(f, 4), FullSpecialize, _unwrap_val(twopoint)}(f, bc; kwargs...)
end
BVPFunction(f::BVPFunction; kwargs...) = f
@@ -4157,25 +5633,17 @@ function IntegralFunction(f, integrand_prototype)
IntegralFunction{true}(f, integrand_prototype)
end
-function BatchIntegralFunction{iip, specialize}(f, integrand_prototype;
- max_batch::Integer = typemax(Int)) where {iip, specialize}
+function BatchIntegralFunction{iip, specialize}(f,
+ integrand_prototype;
+ max_batch::Integer = typemax(Int),) where {iip, specialize}
_f = prepare_function(f)
- BatchIntegralFunction{
- iip,
- specialize,
- typeof(_f),
- typeof(integrand_prototype),
- }(_f,
+ BatchIntegralFunction{iip, specialize, typeof(_f), typeof(integrand_prototype)}(_f,
integrand_prototype,
max_batch)
end
-function BatchIntegralFunction{iip}(f,
- integrand_prototype;
- kwargs...) where {iip}
- return BatchIntegralFunction{iip, FullSpecialize}(f,
- integrand_prototype;
- kwargs...)
+function BatchIntegralFunction{iip}(f, integrand_prototype; kwargs...) where {iip}
+ return BatchIntegralFunction{iip, FullSpecialize}(f, integrand_prototype; kwargs...)
end
function BatchIntegralFunction(f; kwargs...)
@@ -4311,9 +5779,7 @@ for S in [:ODEFunction
:IntegralFunction
:BatchIntegralFunction]
@eval begin
- function ConstructionBase.constructorof(::Type{<:$S{iip}}) where {
- iip,
- }
+ function ConstructionBase.constructorof(::Type{<:$S{iip}}) where {iip}
(args...) -> $S{iip, FullSpecialize, map(typeof, args)...}(args...)
end
end
diff --git a/src/solutions/basic_solutions.jl b/src/solutions/basic_solutions.jl
index 3f5160cde9..4bf633934d 100644
--- a/src/solutions/basic_solutions.jl
+++ b/src/solutions/basic_solutions.jl
@@ -29,13 +29,24 @@ struct LinearSolution{T, N, uType, R, A, C, S} <: AbstractLinearSolution{T, N}
stats::S
end
-function build_linear_solution(alg, u, resid, cache;
- retcode = ReturnCode.Default,
- iters = 0, stats = nothing)
+function build_linear_solution(alg,
+ u,
+ resid,
+ cache;
+ retcode = ReturnCode.Default,
+ iters = 0,
+ stats = nothing,)
T = eltype(eltype(u))
N = length((size(u)...,))
- LinearSolution{T, N, typeof(u), typeof(resid), typeof(alg), typeof(cache),
- typeof(stats)}(u,
+ LinearSolution{
+ T,
+ N,
+ typeof(u),
+ typeof(resid),
+ typeof(alg),
+ typeof(cache),
+ typeof(stats),
+ }(u,
resid,
alg,
retcode,
@@ -78,13 +89,26 @@ struct QuadratureSolution end
@deprecate QuadratureSolution(args...; kwargs...) IntegralSolution(args...; kwargs...)
function build_solution(prob::AbstractIntegralProblem,
- alg, u, resid; chi = nothing,
- retcode = ReturnCode.Default, stats = nothing, kwargs...)
+ alg,
+ u,
+ resid;
+ chi = nothing,
+ retcode = ReturnCode.Default,
+ stats = nothing,
+ kwargs...,)
T = eltype(eltype(u))
N = length((size(u)...,))
- IntegralSolution{T, N, typeof(u), typeof(resid), typeof(prob), typeof(alg), typeof(chi),
- typeof(stats)}(u,
+ IntegralSolution{
+ T,
+ N,
+ typeof(u),
+ typeof(resid),
+ typeof(prob),
+ typeof(alg),
+ typeof(chi),
+ typeof(stats),
+ }(u,
resid,
prob,
alg,
diff --git a/src/solutions/dae_solutions.jl b/src/solutions/dae_solutions.jl
index 9852c9349e..8fa00e0677 100644
--- a/src/solutions/dae_solutions.jl
+++ b/src/solutions/dae_solutions.jl
@@ -53,18 +53,22 @@ end
TruncatedStacktraces.@truncate_stacktrace DAESolution 1 2
-function build_solution(prob::AbstractDAEProblem, alg, t, u, du = nothing;
- timeseries_errors = length(u) > 2,
- dense = false,
- dense_errors = dense,
- calculate_error = true,
- k = nothing,
- interp = du === nothing ? LinearInterpolation(t, u) :
- HermiteInterpolation(t, u, du),
- retcode = ReturnCode.Default,
- destats = missing,
- stats = nothing,
- kwargs...)
+function build_solution(prob::AbstractDAEProblem,
+ alg,
+ t,
+ u,
+ du = nothing;
+ timeseries_errors = length(u) > 2,
+ dense = false,
+ dense_errors = dense,
+ calculate_error = true,
+ k = nothing,
+ interp = du === nothing ? LinearInterpolation(t, u) :
+ HermiteInterpolation(t, u, du),
+ retcode = ReturnCode.Default,
+ destats = missing,
+ stats = nothing,
+ kwargs...,)
T = eltype(eltype(u))
if prob.u0 === nothing
@@ -87,9 +91,20 @@ function build_solution(prob::AbstractDAEProblem, alg, t, u, du = nothing;
u_analytic = Vector{typeof(prob.u0)}()
errors = Dict{Symbol, real(eltype(prob.u0))}()
- sol = DAESolution{T, N, typeof(u), typeof(du), typeof(u_analytic), typeof(errors),
+ sol = DAESolution{
+ T,
+ N,
+ typeof(u),
+ typeof(du),
+ typeof(u_analytic),
+ typeof(errors),
typeof(t),
- typeof(prob), typeof(alg), typeof(interp), typeof(stats)}(u, du,
+ typeof(prob),
+ typeof(alg),
+ typeof(interp),
+ typeof(stats),
+ }(u,
+ du,
u_analytic,
errors,
t,
@@ -102,26 +117,43 @@ function build_solution(prob::AbstractDAEProblem, alg, t, u, du = nothing;
retcode)
if calculate_error
- calculate_solution_errors!(sol; timeseries_errors = timeseries_errors,
- dense_errors = dense_errors)
+ calculate_solution_errors!(sol;
+ timeseries_errors = timeseries_errors,
+ dense_errors = dense_errors,)
end
sol
else
- DAESolution{T, N, typeof(u), typeof(du), Nothing, Nothing, typeof(t),
- typeof(prob), typeof(alg), typeof(interp), typeof(stats)}(u, du,
+ DAESolution{
+ T,
+ N,
+ typeof(u),
+ typeof(du),
+ Nothing,
+ Nothing,
+ typeof(t),
+ typeof(prob),
+ typeof(alg),
+ typeof(interp),
+ typeof(stats),
+ }(u,
+ du,
+ nothing,
nothing,
- nothing, t,
- prob, alg,
+ t,
+ prob,
+ alg,
interp,
- dense, 0,
+ dense,
+ 0,
stats,
retcode)
end
end
function calculate_solution_errors!(sol::AbstractDAESolution;
- fill_uanalytic = true, timeseries_errors = true,
- dense_errors = true)
+ fill_uanalytic = true,
+ timeseries_errors = true,
+ dense_errors = true,)
prob = sol.prob
f = prob.f
@@ -148,8 +180,7 @@ function calculate_solution_errors!(sol::AbstractDAESolution;
sol.errors[:L∞] = norm(maximum(vecvecapply(x -> abs.(x),
interp_u - interp_analytic)))
sol.errors[:L2] = norm(sqrt(recursive_mean(vecvecapply(x -> float.(x) .^ 2,
- interp_u .-
- interp_analytic))))
+ interp_u .- interp_analytic))))
end
end
end
@@ -158,9 +189,19 @@ function calculate_solution_errors!(sol::AbstractDAESolution;
end
function build_solution(sol::AbstractDAESolution{T, N}, u_analytic, errors) where {T, N}
- DAESolution{T, N, typeof(sol.u), typeof(sol.du), typeof(u_analytic), typeof(errors),
+ DAESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(sol.du),
+ typeof(u_analytic),
+ typeof(errors),
typeof(sol.t),
- typeof(sol.prob), typeof(sol.alg), typeof(sol.interp), typeof(sol.stats)}(sol.u,
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ }(sol.u,
sol.du,
u_analytic,
errors,
@@ -175,9 +216,19 @@ function build_solution(sol::AbstractDAESolution{T, N}, u_analytic, errors) wher
end
function solution_new_retcode(sol::AbstractDAESolution{T, N}, retcode) where {T, N}
- DAESolution{T, N, typeof(sol.u), typeof(sol.du), typeof(sol.u_analytic),
- typeof(sol.errors), typeof(sol.t),
- typeof(sol.prob), typeof(sol.alg), typeof(sol.interp), typeof(sol.stats)}(sol.u,
+ DAESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(sol.du),
+ typeof(sol.u_analytic),
+ typeof(sol.errors),
+ typeof(sol.t),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ }(sol.u,
sol.du,
sol.u_analytic,
sol.errors,
@@ -192,9 +243,19 @@ function solution_new_retcode(sol::AbstractDAESolution{T, N}, retcode) where {T,
end
function solution_new_tslocation(sol::AbstractDAESolution{T, N}, tslocation) where {T, N}
- DAESolution{T, N, typeof(sol.u), typeof(sol.du), typeof(sol.u_analytic),
- typeof(sol.errors), typeof(sol.t),
- typeof(sol.prob), typeof(sol.alg), typeof(sol.interp), typeof(sol.stats)}(sol.u,
+ DAESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(sol.du),
+ typeof(sol.u_analytic),
+ typeof(sol.errors),
+ typeof(sol.t),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ }(sol.u,
sol.du,
sol.u_analytic,
sol.errors,
@@ -209,14 +270,21 @@ function solution_new_tslocation(sol::AbstractDAESolution{T, N}, tslocation) whe
end
function solution_slice(sol::AbstractDAESolution{T, N}, I) where {T, N}
- DAESolution{T, N, typeof(sol.u), typeof(sol.du), typeof(sol.u_analytic),
- typeof(sol.errors), typeof(sol.t),
- typeof(sol.prob), typeof(sol.alg), typeof(sol.interp), typeof(sol.stats)}(sol.u[I],
+ DAESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(sol.du),
+ typeof(sol.u_analytic),
+ typeof(sol.errors),
+ typeof(sol.t),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ }(sol.u[I],
sol.du[I],
- sol.u_analytic ===
- nothing ?
- nothing :
- sol.u_analytic[I],
+ sol.u_analytic === nothing ? nothing : sol.u_analytic[I],
sol.errors,
sol.t[I],
sol.prob,
diff --git a/src/solutions/nonlinear_solutions.jl b/src/solutions/nonlinear_solutions.jl
index 2a8eedb70c..339d05c973 100644
--- a/src/solutions/nonlinear_solutions.jl
+++ b/src/solutions/nonlinear_solutions.jl
@@ -27,8 +27,11 @@ function Base.show(io::IO, ::MIME"text/plain", s::NLStats)
end
function Base.merge(s1::NLStats, s2::NLStats)
- NLStats(s1.nf + s2.nf, s1.njacs + s2.njacs, s1.nfactors + s2.nfactors,
- s1.nsolve + s2.nsolve, s1.nsteps + s2.nsteps)
+ NLStats(s1.nf + s2.nf,
+ s1.njacs + s2.njacs,
+ s1.nfactors + s2.nfactors,
+ s1.nsolve + s2.nsolve,
+ s1.nsteps + s2.nsteps)
end
"""
@@ -73,20 +76,41 @@ const SteadyStateSolution = NonlinearSolution
get_p(p::AbstractNonlinearSolution) = p.prob.p
function build_solution(prob::AbstractNonlinearProblem,
- alg, u, resid; calculate_error = true,
- retcode = ReturnCode.Default,
- original = nothing,
- left = nothing,
- right = nothing,
- stats = nothing,
- trace = nothing,
- kwargs...)
+ alg,
+ u,
+ resid;
+ calculate_error = true,
+ retcode = ReturnCode.Default,
+ original = nothing,
+ left = nothing,
+ right = nothing,
+ stats = nothing,
+ trace = nothing,
+ kwargs...,)
T = eltype(eltype(u))
N = ndims(u)
- NonlinearSolution{T, N, typeof(u), typeof(resid), typeof(prob), typeof(alg),
- typeof(original), typeof(left), typeof(stats), typeof(trace)}(u, resid, prob, alg,
- retcode, original, left, right, stats, trace)
+ NonlinearSolution{
+ T,
+ N,
+ typeof(u),
+ typeof(resid),
+ typeof(prob),
+ typeof(alg),
+ typeof(original),
+ typeof(left),
+ typeof(stats),
+ typeof(trace),
+ }(u,
+ resid,
+ prob,
+ alg,
+ retcode,
+ original,
+ left,
+ right,
+ stats,
+ trace)
end
function sensitivity_solution(sol::AbstractNonlinearSolution, u)
@@ -96,8 +120,25 @@ function sensitivity_solution(sol::AbstractNonlinearSolution, u)
# Some of the subtypes might not have a trace field
trace = hasfield(typeof(sol), :trace) ? sol.trace : nothing
- NonlinearSolution{T, N, typeof(u), typeof(sol.resid), typeof(sol.prob),
- typeof(sol.alg), typeof(sol.original), typeof(sol.left),
- typeof(sol.stats), typeof(trace)}(u, sol.resid, sol.prob, sol.alg, sol.retcode,
- sol.original, sol.left, sol.right, sol.stats, trace)
+ NonlinearSolution{
+ T,
+ N,
+ typeof(u),
+ typeof(sol.resid),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.original),
+ typeof(sol.left),
+ typeof(sol.stats),
+ typeof(trace),
+ }(u,
+ sol.resid,
+ sol.prob,
+ sol.alg,
+ sol.retcode,
+ sol.original,
+ sol.left,
+ sol.right,
+ sol.stats,
+ trace)
end
diff --git a/src/solutions/ode_solutions.jl b/src/solutions/ode_solutions.jl
index b3ba74fe6e..b6a3171922 100644
--- a/src/solutions/ode_solutions.jl
+++ b/src/solutions/ode_solutions.jl
@@ -56,8 +56,7 @@ function Base.show(io::IO, ::MIME"text/plain", s::DEStats)
end
function Base.merge(a::DEStats, b::DEStats)
- DEStats(
- a.nf + b.nf,
+ DEStats(a.nf + b.nf,
a.nf2 + b.nf2,
a.nw + b.nw,
a.nsolve + b.nsolve,
@@ -67,8 +66,7 @@ function Base.merge(a::DEStats, b::DEStats)
a.ncondition + b.ncondition,
a.naccept + b.naccept,
a.nreject + b.nreject,
- max(a.maxeig, b.maxeig),
- )
+ max(a.maxeig, b.maxeig))
end
"""
@@ -99,9 +97,20 @@ https://docs.sciml.ai/DiffEqDocs/stable/basics/solution/
exited due to an error. For more details, see
[the return code documentation](https://docs.sciml.ai/SciMLBase/stable/interfaces/Solutions/#retcodes).
"""
-struct ODESolution{T, N, uType, uType2, DType, tType, rateType, P, A, IType, S,
- AC <: Union{Nothing, Vector{Int}}} <:
- AbstractODESolution{T, N, uType}
+struct ODESolution{
+ T,
+ N,
+ uType,
+ uType2,
+ DType,
+ tType,
+ rateType,
+ P,
+ A,
+ IType,
+ S,
+ AC <: Union{Nothing, Vector{Int}},
+} <: AbstractODESolution{T, N, uType}
u::uType
u_analytic::uType2
errors::DType
@@ -125,51 +134,101 @@ Base.@propagate_inbounds function Base.getproperty(x::AbstractODESolution, s::Sy
return getfield(x, s)
end
-function ODESolution{T, N}(u, u_analytic, errors, t, k, prob, alg, interp, dense,
- tslocation, stats, alg_choice, retcode) where {T, N}
- return ODESolution{T, N, typeof(u), typeof(u_analytic), typeof(errors), typeof(t),
- typeof(k), typeof(prob), typeof(alg), typeof(interp),
+function ODESolution{T, N}(u,
+ u_analytic,
+ errors,
+ t,
+ k,
+ prob,
+ alg,
+ interp,
+ dense,
+ tslocation,
+ stats,
+ alg_choice,
+ retcode) where {T, N}
+ return ODESolution{
+ T,
+ N,
+ typeof(u),
+ typeof(u_analytic),
+ typeof(errors),
+ typeof(t),
+ typeof(k),
+ typeof(prob),
+ typeof(alg),
+ typeof(interp),
typeof(stats),
- typeof(alg_choice)}(u, u_analytic, errors, t, k, prob, alg, interp,
- dense, tslocation, stats, alg_choice, retcode)
+ typeof(alg_choice),
+ }(u,
+ u_analytic,
+ errors,
+ t,
+ k,
+ prob,
+ alg,
+ interp,
+ dense,
+ tslocation,
+ stats,
+ alg_choice,
+ retcode)
end
-function (sol::AbstractODESolution)(t, ::Type{deriv} = Val{0}; idxs = nothing,
- continuity = :left) where {deriv}
+function (sol::AbstractODESolution)(t,
+ ::Type{deriv} = Val{0};
+ idxs = nothing,
+ continuity = :left,) where {deriv}
sol(t, deriv, idxs, continuity)
end
-function (sol::AbstractODESolution)(v, t, ::Type{deriv} = Val{0}; idxs = nothing,
- continuity = :left) where {deriv}
+function (sol::AbstractODESolution)(v,
+ t,
+ ::Type{deriv} = Val{0};
+ idxs = nothing,
+ continuity = :left,) where {deriv}
sol.interp(v, t, idxs, deriv, sol.prob.p, continuity)
end
-function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs::Nothing,
- continuity) where {deriv}
+function (sol::AbstractODESolution)(t::Number,
+ ::Type{deriv},
+ idxs::Nothing,
+ continuity) where {deriv}
sol.interp(t, idxs, deriv, sol.prob.p, continuity)
end
-function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv},
- idxs::Nothing, continuity) where {deriv}
+function (sol::AbstractODESolution)(t::AbstractVector{<:Number},
+ ::Type{deriv},
+ idxs::Nothing,
+ continuity) where {deriv}
augment(sol.interp(t, idxs, deriv, sol.prob.p, continuity), sol)
end
-function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs::Integer,
- continuity) where {deriv}
+function (sol::AbstractODESolution)(t::Number,
+ ::Type{deriv},
+ idxs::Integer,
+ continuity) where {deriv}
sol.interp(t, idxs, deriv, sol.prob.p, continuity)
end
-function (sol::AbstractODESolution)(t::Number, ::Type{deriv},
- idxs::AbstractVector{<:Integer},
- continuity) where {deriv}
+function (sol::AbstractODESolution)(t::Number,
+ ::Type{deriv},
+ idxs::AbstractVector{<:Integer},
+ continuity) where {deriv}
sol.interp(t, idxs, deriv, sol.prob.p, continuity)
end
-function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv},
- idxs::Integer, continuity) where {deriv}
+function (sol::AbstractODESolution)(t::AbstractVector{<:Number},
+ ::Type{deriv},
+ idxs::Integer,
+ continuity) where {deriv}
A = sol.interp(t, idxs, deriv, sol.prob.p, continuity)
observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED
p = hasproperty(sol.prob, :p) ? sol.prob.p : nothing
if has_sys(sol.prob.f)
- DiffEqArray{typeof(A).parameters[1:4]..., typeof(sol.prob.f.sys), typeof(observed),
- typeof(p)}(A.u,
+ DiffEqArray{
+ typeof(A).parameters[1:4]...,
+ typeof(sol.prob.f.sys),
+ typeof(observed),
+ typeof(p),
+ }(A.u,
A.t,
sol.prob.f.sys,
observed,
@@ -180,15 +239,20 @@ function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv},
DiffEqArray(A.u, A.t, syms, getindepsym(sol), observed, p)
end
end
-function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv},
- idxs::AbstractVector{<:Integer},
- continuity) where {deriv}
+function (sol::AbstractODESolution)(t::AbstractVector{<:Number},
+ ::Type{deriv},
+ idxs::AbstractVector{<:Integer},
+ continuity) where {deriv}
A = sol.interp(t, idxs, deriv, sol.prob.p, continuity)
observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED
p = hasproperty(sol.prob, :p) ? sol.prob.p : nothing
if has_sys(sol.prob.f)
- DiffEqArray{typeof(A).parameters[1:4]..., typeof(sol.prob.f.sys), typeof(observed),
- typeof(p)}(A.u,
+ DiffEqArray{
+ typeof(A).parameters[1:4]...,
+ typeof(sol.prob.f.sys),
+ typeof(observed),
+ typeof(p),
+ }(A.u,
A.t,
sol.prob.f.sys,
observed,
@@ -200,59 +264,83 @@ function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv},
end
end
-function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs,
- continuity) where {deriv}
+function (sol::AbstractODESolution)(t::Number,
+ ::Type{deriv},
+ idxs,
+ continuity) where {deriv}
issymbollike(idxs) || error("Incorrect specification of `idxs`")
augment(sol.interp([t], nothing, deriv, sol.prob.p, continuity), sol)[idxs][1]
end
-function (sol::AbstractODESolution)(t::Number, ::Type{deriv}, idxs::AbstractVector,
- continuity) where {deriv}
+function (sol::AbstractODESolution)(t::Number,
+ ::Type{deriv},
+ idxs::AbstractVector,
+ continuity) where {deriv}
all(issymbollike.(idxs)) || error("Incorrect specification of `idxs`")
interp_sol = augment(sol.interp([t], nothing, deriv, sol.prob.p, continuity), sol)
[first(interp_sol[idx]) for idx in idxs]
end
-function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv}, idxs,
- continuity) where {deriv}
+function (sol::AbstractODESolution)(t::AbstractVector{<:Number},
+ ::Type{deriv},
+ idxs,
+ continuity) where {deriv}
issymbollike(idxs) || error("Incorrect specification of `idxs`")
interp_sol = augment(sol.interp(t, nothing, deriv, sol.prob.p, continuity), sol)
observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED
p = hasproperty(sol.prob, :p) ? sol.prob.p : nothing
if has_sys(sol.prob.f)
- return DiffEqArray(interp_sol[idxs], t, [idxs],
- independent_variables(sol.prob.f.sys), observed, p)
+ return DiffEqArray(interp_sol[idxs],
+ t,
+ [idxs],
+ independent_variables(sol.prob.f.sys),
+ observed,
+ p)
else
return DiffEqArray(interp_sol[idxs], t, [idxs], getindepsym(sol), observed, p)
end
end
-function (sol::AbstractODESolution)(t::AbstractVector{<:Number}, ::Type{deriv},
- idxs::AbstractVector, continuity) where {deriv}
+function (sol::AbstractODESolution)(t::AbstractVector{<:Number},
+ ::Type{deriv},
+ idxs::AbstractVector,
+ continuity) where {deriv}
all(issymbollike.(idxs)) || error("Incorrect specification of `idxs`")
interp_sol = augment(sol.interp(t, nothing, deriv, sol.prob.p, continuity), sol)
observed = has_observed(sol.prob.f) ? sol.prob.f.observed : DEFAULT_OBSERVED
p = hasproperty(sol.prob, :p) ? sol.prob.p : nothing
if has_sys(sol.prob.f)
- return DiffEqArray([[interp_sol[idx][i] for idx in idxs] for i in 1:length(t)], t,
+ return DiffEqArray([[interp_sol[idx][i] for idx in idxs] for i in 1:length(t)],
+ t,
idxs,
- independent_variables(sol.prob.f.sys), observed, p)
+ independent_variables(sol.prob.f.sys),
+ observed,
+ p)
else
- return DiffEqArray([[interp_sol[idx][i] for idx in idxs] for i in 1:length(t)], t,
+ return DiffEqArray([[interp_sol[idx][i] for idx in idxs] for i in 1:length(t)],
+ t,
idxs,
- getindepsym(sol), observed, p)
+ getindepsym(sol),
+ observed,
+ p)
end
end
function build_solution(prob::Union{AbstractODEProblem, AbstractDDEProblem},
- alg, t, u; timeseries_errors = length(u) > 2,
- dense = false, dense_errors = dense,
- calculate_error = true,
- k = nothing,
- alg_choice = nothing,
- interp = LinearInterpolation(t, u),
- retcode = ReturnCode.Default, destats = missing, stats = nothing,
- kwargs...)
+ alg,
+ t,
+ u;
+ timeseries_errors = length(u) > 2,
+ dense = false,
+ dense_errors = dense,
+ calculate_error = true,
+ k = nothing,
+ alg_choice = nothing,
+ interp = LinearInterpolation(t, u),
+ retcode = ReturnCode.Default,
+ destats = missing,
+ stats = nothing,
+ kwargs...,)
T = eltype(eltype(u))
if prob.u0 === nothing
@@ -283,7 +371,8 @@ function build_solution(prob::Union{AbstractODEProblem, AbstractDDEProblem},
sol = ODESolution{T, N}(u,
u_analytic,
errors,
- t, k,
+ t,
+ k,
prob,
alg,
interp,
@@ -293,15 +382,17 @@ function build_solution(prob::Union{AbstractODEProblem, AbstractDDEProblem},
alg_choice,
retcode)
if calculate_error
- calculate_solution_errors!(sol; timeseries_errors = timeseries_errors,
- dense_errors = dense_errors)
+ calculate_solution_errors!(sol;
+ timeseries_errors = timeseries_errors,
+ dense_errors = dense_errors,)
end
return sol
else
return ODESolution{T, N}(u,
nothing,
nothing,
- t, k,
+ t,
+ k,
prob,
alg,
interp,
@@ -313,8 +404,10 @@ function build_solution(prob::Union{AbstractODEProblem, AbstractDDEProblem},
end
end
-function calculate_solution_errors!(sol::AbstractODESolution; fill_uanalytic = true,
- timeseries_errors = true, dense_errors = true)
+function calculate_solution_errors!(sol::AbstractODESolution;
+ fill_uanalytic = true,
+ timeseries_errors = true,
+ dense_errors = true,)
f = sol.prob.f
if fill_uanalytic
@@ -338,15 +431,14 @@ function calculate_solution_errors!(sol::AbstractODESolution; fill_uanalytic = t
sol.errors[:l2] = norm(sqrt(recursive_mean(vecvecapply((x) -> float.(x) .^ 2,
sol.u - sol.u_analytic))))
if sol.dense && dense_errors
- densetimes = collect(range(sol.t[1], stop = sol.t[end], length = 100))
+ densetimes = collect(range(sol.t[1]; stop = sol.t[end], length = 100))
interp_u = sol(densetimes)
interp_analytic = VectorOfArray([f.analytic(sol.prob.u0, sol.prob.p, t)
for t in densetimes])
sol.errors[:L∞] = norm(maximum(vecvecapply((x) -> abs.(x),
interp_u - interp_analytic)))
sol.errors[:L2] = norm(sqrt(recursive_mean(vecvecapply((x) -> float.(x) .^ 2,
- interp_u -
- interp_analytic))))
+ interp_u - interp_analytic))))
end
end
end
@@ -427,9 +519,17 @@ function sensitivity_solution(sol::ODESolution, u, t)
SensitivityInterpolation(t, u)
end
- ODESolution{T, N}(u, sol.u_analytic, sol.errors, t,
- nothing, sol.prob,
- sol.alg, interp,
- sol.dense, sol.tslocation,
- sol.stats, sol.alg_choice, sol.retcode)
+ ODESolution{T, N}(u,
+ sol.u_analytic,
+ sol.errors,
+ t,
+ nothing,
+ sol.prob,
+ sol.alg,
+ interp,
+ sol.dense,
+ sol.tslocation,
+ sol.stats,
+ sol.alg_choice,
+ sol.retcode)
end
diff --git a/src/solutions/optimization_solutions.jl b/src/solutions/optimization_solutions.jl
index 9f04a5bdd4..9718978670 100644
--- a/src/solutions/optimization_solutions.jl
+++ b/src/solutions/optimization_solutions.jl
@@ -30,22 +30,38 @@ struct OptimizationSolution{T, N, uType, C <: AbstractOptimizationCache, A, OV,
end
function build_solution(cache::AbstractOptimizationCache,
- alg, u, objective;
- retcode = ReturnCode.Default,
- original = nothing,
- solve_time = nothing,
- stats = nothing,
- kwargs...)
+ alg,
+ u,
+ objective;
+ retcode = ReturnCode.Default,
+ original = nothing,
+ solve_time = nothing,
+ stats = nothing,
+ kwargs...,)
T = eltype(eltype(u))
N = ndims(u)
#Backwords compatibility, remove ASAP
retcode = symbol_to_ReturnCode(retcode)
- OptimizationSolution{T, N, typeof(u), typeof(cache), typeof(alg),
- typeof(objective), typeof(original), typeof(solve_time),
- typeof(stats)}(u, cache, alg, objective, retcode, original,
- solve_time, stats)
+ OptimizationSolution{
+ T,
+ N,
+ typeof(u),
+ typeof(cache),
+ typeof(alg),
+ typeof(objective),
+ typeof(original),
+ typeof(solve_time),
+ typeof(stats),
+ }(u,
+ cache,
+ alg,
+ objective,
+ retcode,
+ original,
+ solve_time,
+ stats)
end
TruncatedStacktraces.@truncate_stacktrace OptimizationSolution 1 2
@@ -63,10 +79,12 @@ end
# for compatibility
function build_solution(prob::AbstractOptimizationProblem,
- alg, u, objective;
- retcode = ReturnCode.Default,
- original = nothing,
- kwargs...)
+ alg,
+ u,
+ objective;
+ retcode = ReturnCode.Default,
+ original = nothing,
+ kwargs...,)
T = eltype(eltype(u))
N = ndims(u)
@@ -79,8 +97,18 @@ function build_solution(prob::AbstractOptimizationProblem,
#Backwords compatibility, remove ASAP
retcode = symbol_to_ReturnCode(retcode)
- OptimizationSolution{T, N, typeof(u), typeof(cache), typeof(alg),
- typeof(objective), typeof(original)}(u, cache, alg, objective,
+ OptimizationSolution{
+ T,
+ N,
+ typeof(u),
+ typeof(cache),
+ typeof(alg),
+ typeof(objective),
+ typeof(original),
+ }(u,
+ cache,
+ alg,
+ objective,
retcode,
original)
end
@@ -104,10 +132,9 @@ function Base.show(io::IO, A::AbstractOptimizationSolution)
end
Base.@propagate_inbounds function Base.getproperty(x::AbstractOptimizationSolution,
- s::Symbol)
+ s::Symbol)
if s === :minimizer
- Base.depwarn("`sol.minimizer` is deprecated. Use `sol.u` instead.",
- "sol.minimizer")
+ Base.depwarn("`sol.minimizer` is deprecated. Use `sol.u` instead.", "sol.minimizer")
return getfield(x, :u)
elseif s === :minimum
Base.depwarn("`sol.minimum` is deprecated. Use `sol.objective` instead.",
@@ -124,8 +151,11 @@ end
function Base.summary(io::IO, A::AbstractOptimizationSolution)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(A)),
- no_color, " with uType ",
- type_color, eltype(A.u),
+ type_color,
+ nameof(typeof(A)),
+ no_color,
+ " with uType ",
+ type_color,
+ eltype(A.u),
no_color)
end
diff --git a/src/solutions/pde_solutions.jl b/src/solutions/pde_solutions.jl
index 8093eb8916..c617d1cdd4 100644
--- a/src/solutions/pde_solutions.jl
+++ b/src/solutions/pde_solutions.jl
@@ -30,9 +30,22 @@ Solution to a PDE, solved from an ODEProblem generated by a discretizer.
[the return code documentation](https://docs.sciml.ai/SciMLBase/stable/interfaces/Solutions/#retcodes).
- `stats`: statistics of the solver, such as the number of function evaluations required.
"""
-struct PDETimeSeriesSolution{T, N, uType, Disc, Sol, DType, tType, domType, ivType, dvType,
- P, A,
- IType, S} <: AbstractPDETimeSeriesSolution{T, N, uType, Disc}
+struct PDETimeSeriesSolution{
+ T,
+ N,
+ uType,
+ Disc,
+ Sol,
+ DType,
+ tType,
+ domType,
+ ivType,
+ dvType,
+ P,
+ A,
+ IType,
+ S,
+} <: AbstractPDETimeSeriesSolution{T, N, uType, Disc}
u::uType
original_sol::Sol
errors::DType
@@ -81,8 +94,8 @@ Solution to a PDE, solved from an NonlinearProblem generated by a discretizer.
callback (`sol.retcode === ReturnCode.Terminated`), or whether it exited due to an error. For more
details, see the return code section of the ODEProblem.jl documentation.
"""
-struct PDENoTimeSolution{T, N, uType, Disc, Sol, domType, ivType, dvType, P, A,
- IType, S} <: AbstractPDENoTimeSolution{T, N, uType, Disc}
+struct PDENoTimeSolution{T, N, uType, Disc, Sol, domType, ivType, dvType, P, A, IType, S} <:
+ AbstractPDENoTimeSolution{T, N, uType, Disc}
u::uType
original_sol::Sol
ivdomain::domType
@@ -98,8 +111,10 @@ end
TruncatedStacktraces.@truncate_stacktrace PDENoTimeSolution 1 2
-const PDESolution{T, N, S, D} = Union{PDETimeSeriesSolution{T, N, S, D},
- PDENoTimeSolution{T, N, S, D}}
+const PDESolution{T, N, S, D} = Union{
+ PDETimeSeriesSolution{T, N, S, D},
+ PDENoTimeSolution{T, N, S, D},
+}
"""
Dispatch for the following function should be implemented in each discretizer package, for their relevant metadata type `D`.
@@ -112,9 +127,7 @@ end
Intercept PDE wrapping. Please implement a method for the PDESolution types in your discretizer.
"""
function SciMLBase.wrap_sol(sol,
- metadata::AbstractDiscretizationMetadata{hasTime}) where {
- hasTime,
-}
+ metadata::AbstractDiscretizationMetadata{hasTime}) where {hasTime}
if hasTime isa Val{true}
return PDETimeSeriesSolution(sol, metadata)
else
diff --git a/src/solutions/rode_solutions.jl b/src/solutions/rode_solutions.jl
index 350c3c8237..7342a1e70b 100644
--- a/src/solutions/rode_solutions.jl
+++ b/src/solutions/rode_solutions.jl
@@ -32,9 +32,20 @@ https://docs.sciml.ai/DiffEqDocs/stable/basics/solution/
exited due to an error. For more details, see
[the return code documentation](https://docs.sciml.ai/SciMLBase/stable/interfaces/Solutions/#retcodes).
"""
-struct RODESolution{T, N, uType, uType2, DType, tType, randType, P, A, IType, S,
- AC <: Union{Nothing, Vector{Int}}} <:
- AbstractRODESolution{T, N, uType}
+struct RODESolution{
+ T,
+ N,
+ uType,
+ uType2,
+ DType,
+ tType,
+ randType,
+ P,
+ A,
+ IType,
+ S,
+ AC <: Union{Nothing, Vector{Int}},
+} <: AbstractRODESolution{T, N, uType}
u::uType
u_analytic::uType2
errors::DType
@@ -61,22 +72,36 @@ end
TruncatedStacktraces.@truncate_stacktrace RODESolution 1 2
-function (sol::RODESolution)(t, ::Type{deriv} = Val{0}; idxs = nothing,
- continuity = :left) where {deriv}
+function (sol::RODESolution)(t,
+ ::Type{deriv} = Val{0};
+ idxs = nothing,
+ continuity = :left,) where {deriv}
sol.interp(t, idxs, deriv, sol.prob.p, continuity)
end
-function (sol::RODESolution)(v, t, ::Type{deriv} = Val{0}; idxs = nothing,
- continuity = :left) where {deriv}
+function (sol::RODESolution)(v,
+ t,
+ ::Type{deriv} = Val{0};
+ idxs = nothing,
+ continuity = :left,) where {deriv}
sol.interp(v, t, idxs, deriv, sol.prob.p, continuity)
end
function build_solution(prob::Union{AbstractRODEProblem, AbstractSDDEProblem},
- alg, t, u; W = nothing, timeseries_errors = length(u) > 2,
- dense = false, dense_errors = dense, calculate_error = true,
- interp = LinearInterpolation(t, u),
- retcode = ReturnCode.Default,
- alg_choice = nothing,
- seed = UInt64(0), destats = missing, stats = nothing, kwargs...)
+ alg,
+ t,
+ u;
+ W = nothing,
+ timeseries_errors = length(u) > 2,
+ dense = false,
+ dense_errors = dense,
+ calculate_error = true,
+ interp = LinearInterpolation(t, u),
+ retcode = ReturnCode.Default,
+ alg_choice = nothing,
+ seed = UInt64(0),
+ destats = missing,
+ stats = nothing,
+ kwargs...,)
T = eltype(eltype(u))
N = length((size(prob.u0)..., length(u)))
@@ -99,13 +124,24 @@ function build_solution(prob::Union{AbstractRODEProblem, AbstractSDDEProblem},
if has_analytic(f)
u_analytic = Vector{typeof(prob.u0)}()
errors = Dict{Symbol, real(eltype(prob.u0))}()
- sol = RODESolution{T, N, typeof(u), typeof(u_analytic), typeof(errors), typeof(t),
+ sol = RODESolution{
+ T,
+ N,
+ typeof(u),
+ typeof(u_analytic),
+ typeof(errors),
+ typeof(t),
typeof(W),
- typeof(prob), typeof(alg), typeof(interp), typeof(stats),
- typeof(alg_choice)}(u,
+ typeof(prob),
+ typeof(alg),
+ typeof(interp),
+ typeof(stats),
+ typeof(alg_choice),
+ }(u,
u_analytic,
errors,
- t, W,
+ t,
+ W,
prob,
alg,
interp,
@@ -117,23 +153,47 @@ function build_solution(prob::Union{AbstractRODEProblem, AbstractSDDEProblem},
seed)
if calculate_error
- calculate_solution_errors!(sol; timeseries_errors = timeseries_errors,
- dense_errors = dense_errors)
+ calculate_solution_errors!(sol;
+ timeseries_errors = timeseries_errors,
+ dense_errors = dense_errors,)
end
return sol
else
- return RODESolution{T, N, typeof(u), Nothing, Nothing, typeof(t),
- typeof(W), typeof(prob), typeof(alg), typeof(interp),
- typeof(stats), typeof(alg_choice)}(u, nothing, nothing, t, W,
- prob, alg, interp,
- dense, 0, stats,
- alg_choice, retcode, seed)
+ return RODESolution{
+ T,
+ N,
+ typeof(u),
+ Nothing,
+ Nothing,
+ typeof(t),
+ typeof(W),
+ typeof(prob),
+ typeof(alg),
+ typeof(interp),
+ typeof(stats),
+ typeof(alg_choice),
+ }(u,
+ nothing,
+ nothing,
+ t,
+ W,
+ prob,
+ alg,
+ interp,
+ dense,
+ 0,
+ stats,
+ alg_choice,
+ retcode,
+ seed)
end
end
-function calculate_solution_errors!(sol::AbstractRODESolution; fill_uanalytic = true,
- timeseries_errors = true, dense_errors = true)
+function calculate_solution_errors!(sol::AbstractRODESolution;
+ fill_uanalytic = true,
+ timeseries_errors = true,
+ dense_errors = true,)
if sol.prob.f isa Tuple
f = sol.prob.f[1]
else
@@ -166,67 +226,136 @@ function calculate_solution_errors!(sol::AbstractRODESolution; fill_uanalytic =
sol.u - sol.u_analytic))))
end
if dense_errors
- densetimes = collect(range(sol.t[1], stop = sol.t[end], length = 100))
+ densetimes = collect(range(sol.t[1]; stop = sol.t[end], length = 100))
interp_u = sol(densetimes)
interp_analytic = [f.analytic(sol.u[1], sol.prob.p, t, sol.W(t)[1])
for t in densetimes]
sol.errors[:L∞] = norm(maximum(vecvecapply((x) -> abs.(x),
interp_u - interp_analytic)))
sol.errors[:L2] = norm(sqrt(recursive_mean(vecvecapply((x) -> float.(x) .^ 2,
- interp_u -
- interp_analytic))))
+ interp_u - interp_analytic))))
end
end
end
function build_solution(sol::AbstractRODESolution{T, N}, u_analytic, errors) where {T, N}
- RODESolution{T, N, typeof(sol.u), typeof(u_analytic), typeof(errors), typeof(sol.t),
- typeof(sol.W), typeof(sol.prob), typeof(sol.alg), typeof(sol.interp),
- typeof(sol.stats), typeof(sol.alg_choice)}(sol.u, u_analytic, errors,
- sol.t, sol.W, sol.prob,
- sol.alg, sol.interp,
- sol.dense, sol.tslocation,
- sol.stats, sol.alg_choice,
- sol.retcode, sol.seed)
+ RODESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(u_analytic),
+ typeof(errors),
+ typeof(sol.t),
+ typeof(sol.W),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ typeof(sol.alg_choice),
+ }(sol.u,
+ u_analytic,
+ errors,
+ sol.t,
+ sol.W,
+ sol.prob,
+ sol.alg,
+ sol.interp,
+ sol.dense,
+ sol.tslocation,
+ sol.stats,
+ sol.alg_choice,
+ sol.retcode,
+ sol.seed)
end
function solution_new_retcode(sol::AbstractRODESolution{T, N}, retcode) where {T, N}
- RODESolution{T, N, typeof(sol.u), typeof(sol.u_analytic), typeof(sol.errors),
+ RODESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(sol.u_analytic),
+ typeof(sol.errors),
typeof(sol.t),
- typeof(sol.W), typeof(sol.prob), typeof(sol.alg), typeof(sol.interp),
- typeof(sol.stats), typeof(sol.alg_choice)}(sol.u, sol.u_analytic,
- sol.errors, sol.t, sol.W,
- sol.prob, sol.alg, sol.interp,
- sol.dense, sol.tslocation,
- sol.stats, sol.alg_choice,
- retcode, sol.seed)
+ typeof(sol.W),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ typeof(sol.alg_choice),
+ }(sol.u,
+ sol.u_analytic,
+ sol.errors,
+ sol.t,
+ sol.W,
+ sol.prob,
+ sol.alg,
+ sol.interp,
+ sol.dense,
+ sol.tslocation,
+ sol.stats,
+ sol.alg_choice,
+ retcode,
+ sol.seed)
end
function solution_new_tslocation(sol::AbstractRODESolution{T, N}, tslocation) where {T, N}
- RODESolution{T, N, typeof(sol.u), typeof(sol.u_analytic), typeof(sol.errors),
+ RODESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(sol.u_analytic),
+ typeof(sol.errors),
typeof(sol.t),
- typeof(sol.W), typeof(sol.prob), typeof(sol.alg), typeof(sol.interp),
- typeof(sol.stats), typeof(sol.alg_choice)}(sol.u, sol.u_analytic,
- sol.errors, sol.t, sol.W,
- sol.prob, sol.alg, sol.interp,
- sol.dense, tslocation,
- sol.stats, sol.alg_choice,
- sol.retcode, sol.seed)
+ typeof(sol.W),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ typeof(sol.alg_choice),
+ }(sol.u,
+ sol.u_analytic,
+ sol.errors,
+ sol.t,
+ sol.W,
+ sol.prob,
+ sol.alg,
+ sol.interp,
+ sol.dense,
+ tslocation,
+ sol.stats,
+ sol.alg_choice,
+ sol.retcode,
+ sol.seed)
end
function solution_slice(sol::AbstractRODESolution{T, N}, I) where {T, N}
- RODESolution{T, N, typeof(sol.u), typeof(sol.u_analytic), typeof(sol.errors),
+ RODESolution{
+ T,
+ N,
+ typeof(sol.u),
+ typeof(sol.u_analytic),
+ typeof(sol.errors),
typeof(sol.t),
- typeof(sol.W), typeof(sol.prob), typeof(sol.alg), typeof(sol.interp),
- typeof(sol.stats), typeof(sol.alg_choice)}(sol.u[I],
- sol.u_analytic === nothing ?
- nothing : sol.u_analytic,
- sol.errors, sol.t[I],
- sol.W, sol.prob,
- sol.alg, sol.interp,
- false, sol.tslocation,
- sol.stats, sol.alg_choice,
- sol.retcode, sol.seed)
+ typeof(sol.W),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ typeof(sol.alg_choice),
+ }(sol.u[I],
+ sol.u_analytic === nothing ? nothing : sol.u_analytic,
+ sol.errors,
+ sol.t[I],
+ sol.W,
+ sol.prob,
+ sol.alg,
+ sol.interp,
+ false,
+ sol.tslocation,
+ sol.stats,
+ sol.alg_choice,
+ sol.retcode,
+ sol.seed)
end
function sensitivity_solution(sol::AbstractRODESolution, u, t)
@@ -240,10 +369,20 @@ function sensitivity_solution(sol::AbstractRODESolution, u, t)
SensitivityInterpolation(t, u)
end
- RODESolution{T, N, typeof(u), typeof(sol.u_analytic),
- typeof(sol.errors), typeof(t),
- typeof(nothing), typeof(sol.prob), typeof(sol.alg),
- typeof(sol.interp), typeof(sol.stats), typeof(sol.alg_choice)}(u,
+ RODESolution{
+ T,
+ N,
+ typeof(u),
+ typeof(sol.u_analytic),
+ typeof(sol.errors),
+ typeof(t),
+ typeof(nothing),
+ typeof(sol.prob),
+ typeof(sol.alg),
+ typeof(sol.interp),
+ typeof(sol.stats),
+ typeof(sol.alg_choice),
+ }(u,
sol.u_analytic,
sol.errors,
t,
diff --git a/src/solutions/solution_interface.jl b/src/solutions/solution_interface.jl
index 04dc0e1520..db15932128 100644
--- a/src/solutions/solution_interface.jl
+++ b/src/solutions/solution_interface.jl
@@ -42,30 +42,38 @@ end
# For handling ambiguities
for T in [Int, Colon]
@eval Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution,
- I::$T)
+ I::$T)
A.u[I]
end
end
Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution,
- I::Union{Int, AbstractArray{Int},
- CartesianIndex, Colon, BitArray,
- AbstractArray{Bool}}...)
+ I::Union{
+ Int,
+ AbstractArray{Int},
+ CartesianIndex,
+ Colon,
+ BitArray,
+ AbstractArray{Bool},
+ }...)
RecursiveArrayTools.VectorOfArray(A.u)[I...]
end
-Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, i::Int,
- ::Colon)
+Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution,
+ i::Int,
+ ::Colon)
[A.u[j][i] for j in 1:length(A)]
end
-Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, ::Colon,
- i::Int)
+Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution,
+ ::Colon,
+ i::Int)
A.u[i]
end
-Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, i::Int,
- II::AbstractArray{Int})
+Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution,
+ i::Int,
+ II::AbstractArray{Int})
[A.u[j][i] for j in II]
end
Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution,
- ii::CartesianIndex)
+ ii::CartesianIndex)
ti = Tuple(ii)
i = last(ti)
jj = CartesianIndex(Base.front(ti))
@@ -80,7 +88,8 @@ Base.@propagate_inbounds function Base.getindex(A::AbstractTimeseriesSolution, s
i = sym_to_index(sym, A)
elseif all(issymbollike, sym)
if has_sys(A.prob.f) && all(Base.Fix1(is_param_sym, A.prob.f.sys), sym) ||
- !has_sys(A.prob.f) && has_paramsyms(A.prob.f) &&
+ !has_sys(A.prob.f) &&
+ has_paramsyms(A.prob.f) &&
all(in(getparamsyms(A)), Symbol.(sym))
return getindex.((A,), sym)
else
@@ -197,11 +206,17 @@ end
function Base.summary(io::IO, A::AbstractTimeseriesSolution)
type_color, no_color = get_colorizers(io)
print(io,
- type_color, nameof(typeof(A)),
- no_color, " with uType ",
- type_color, eltype(A.u),
- no_color, " and tType ",
- type_color, eltype(A.t), no_color)
+ type_color,
+ nameof(typeof(A)),
+ no_color,
+ " with uType ",
+ type_color,
+ eltype(A.u),
+ no_color,
+ " and tType ",
+ type_color,
+ eltype(A.t),
+ no_color)
end
function Base.show(io::IO, m::MIME"text/plain", A::AbstractTimeseriesSolution)
@@ -235,23 +250,23 @@ DEFAULT_PLOT_FUNC(x...) = (x...,)
DEFAULT_PLOT_FUNC(x, y, z) = (x, y, z) # For v0.5.2 bug
@recipe function f(sol::AbstractTimeseriesSolution;
- plot_analytic = false,
- denseplot = (sol.dense ||
- sol.prob isa AbstractDiscreteProblem) &&
- !(sol isa AbstractRODESolution) &&
- !(hasfield(typeof(sol), :interp) &&
- sol.interp isa SensitivityInterpolation),
- plotdensity = min(Int(1e5),
- sol.tslocation == 0 ?
- (sol.prob isa AbstractDiscreteProblem ?
- max(1000, 100 * length(sol)) :
- max(1000, 10 * length(sol))) :
- 1000 * sol.tslocation),
- tspan = nothing, axis_safety = 0.1,
- vars = nothing, idxs = nothing)
+ plot_analytic = false,
+ denseplot = (sol.dense || sol.prob isa AbstractDiscreteProblem) &&
+ !(sol isa AbstractRODESolution) &&
+ !(hasfield(typeof(sol), :interp) &&
+ sol.interp isa SensitivityInterpolation),
+ plotdensity = min(Int(1e5),
+ sol.tslocation == 0 ?
+ (sol.prob isa AbstractDiscreteProblem ? max(1000, 100 * length(sol)) :
+ max(1000, 10 * length(sol))) : 1000 * sol.tslocation),
+ tspan = nothing,
+ axis_safety = 0.1,
+ vars = nothing,
+ idxs = nothing,)
if vars !== nothing
Base.depwarn("To maintain consistency with solution indexing, keyword argument vars will be removed in a future version. Please use keyword argument idxs instead.",
- :f; force = true)
+ :f;
+ force = true,)
(idxs !== nothing) &&
error("Simultaneously using keywords vars and idxs is not supported. Please only use idxs.")
idxs = vars
@@ -266,9 +281,16 @@ DEFAULT_PLOT_FUNC(x, y, z) = (x, y, z) # For v0.5.2 bug
strs = cleansyms(syms)
tscale = get(plotattributes, :xscale, :identity)
- plot_vecs, labels = diffeq_to_arrays(sol, plot_analytic, denseplot,
- plotdensity, tspan, axis_safety,
- idxs, int_vars, tscale, strs)
+ plot_vecs, labels = diffeq_to_arrays(sol,
+ plot_analytic,
+ denseplot,
+ plotdensity,
+ tspan,
+ axis_safety,
+ idxs,
+ int_vars,
+ tscale,
+ strs)
tdir = sign(sol.t[end] - sol.t[1])
xflip --> tdir < 0
@@ -362,8 +384,16 @@ DEFAULT_PLOT_FUNC(x, y, z) = (x, y, z) # For v0.5.2 bug
(plot_vecs...,)
end
-function diffeq_to_arrays(sol, plot_analytic, denseplot, plotdensity, tspan, axis_safety,
- vars, int_vars, tscale, strs)
+function diffeq_to_arrays(sol,
+ plot_analytic,
+ denseplot,
+ plotdensity,
+ tspan,
+ axis_safety,
+ vars,
+ int_vars,
+ tscale,
+ strs)
if tspan === nothing
if sol.tslocation == 0
end_idx = length(sol)
@@ -378,7 +408,7 @@ function diffeq_to_arrays(sol, plot_analytic, denseplot, plotdensity, tspan, axi
# determine type of spacing for plott
densetspacer = if tscale in [:ln, :log10, :log2]
- (start, stop, n) -> exp10.(range(log10(start), stop = log10(stop), length = n))
+ (start, stop, n) -> exp10.(range(log10(start); stop = log10(stop), length = n))
else
(start, stop, n) -> range(start; stop = stop, length = n)
end
@@ -396,7 +426,8 @@ function diffeq_to_arrays(sol, plot_analytic, denseplot, plotdensity, tspan, axi
plot_timeseries = sol(plott)
if plot_analytic
if sol.prob.f isa Tuple
- plot_analytic_timeseries = [sol.prob.f[1].analytic(sol.prob.u0, sol.prob.p,
+ plot_analytic_timeseries = [sol.prob.f[1].analytic(sol.prob.u0,
+ sol.prob.p,
t) for t in plott]
else
plot_analytic_timeseries = [sol.prob.f.analytic(sol.prob.u0, sol.prob.p, t)
@@ -436,8 +467,13 @@ function diffeq_to_arrays(sol, plot_analytic, denseplot, plotdensity, tspan, axi
@assert length(var) == dims
end
# Should check that all have the same dims!
- plot_vecs, labels = solplot_vecs_and_labels(dims, int_vars, plot_timeseries, plott, sol,
- plot_analytic, plot_analytic_timeseries,
+ plot_vecs, labels = solplot_vecs_and_labels(dims,
+ int_vars,
+ plot_timeseries,
+ plott,
+ sol,
+ plot_analytic,
+ plot_analytic_timeseries,
strs)
end
@@ -524,7 +560,8 @@ function interpret_vars(vars, sol, syms)
if vars[end] isa AbstractArray
# If both axes are lists we zip (will fail if different lengths)
vars = collect(zip([DEFAULT_PLOT_FUNC for i in eachindex(vars[end - 1])],
- vars[end - 1], vars[end]))
+ vars[end - 1],
+ vars[end]))
else
# Just the x axis is a list
vars = [(DEFAULT_PLOT_FUNC, x, vars[end]) for x in vars[end - 1]]
@@ -651,8 +688,14 @@ function u_n(timeseries::AbstractArray, sym, sol, plott, plot_timeseries)
end
end
-function solplot_vecs_and_labels(dims, vars, plot_timeseries, plott, sol, plot_analytic,
- plot_analytic_timeseries, strs)
+function solplot_vecs_and_labels(dims,
+ vars,
+ plot_timeseries,
+ plott,
+ sol,
+ plot_analytic,
+ plot_analytic_timeseries,
+ strs)
plot_vecs = []
labels = String[]
for x in vars
@@ -682,7 +725,10 @@ function solplot_vecs_and_labels(dims, vars, plot_timeseries, plott, sol, plot_a
tmp = []
for j in 2:dims
push!(tmp,
- u_n(plot_analytic_timeseries, x[j], sol, plott,
+ u_n(plot_analytic_timeseries,
+ x[j],
+ sol,
+ plott,
plot_analytic_timeseries))
end
f = x[1]
diff --git a/src/solve.jl b/src/solve.jl
index f9c98c32f4..53f2551a3c 100644
--- a/src/solve.jl
+++ b/src/solve.jl
@@ -18,22 +18,22 @@ solve(prob::OptimizationProblem, alg::AbstractOptimizationAlgorithm, args...; kw
The arguments to `solve` are common across all of the optimizers.
These common arguments are:
-- `maxiters` (the maximum number of iterations)
-- `maxtime` (the maximum of time the optimization runs for)
-- `abstol` (absolute tolerance in changes of the objective value)
-- `reltol` (relative tolerance in changes of the objective value)
-- `callback` (a callback function)
+ - `maxiters` (the maximum number of iterations)
+ - `maxtime` (the maximum of time the optimization runs for)
+ - `abstol` (absolute tolerance in changes of the objective value)
+ - `reltol` (relative tolerance in changes of the objective value)
+ - `callback` (a callback function)
If the chosen global optimizer employs a local optimization method,
a similar set of common local optimizer arguments exists.
The common local optimizer arguments are:
-- `local_method` (optimizer used for local optimization in global method)
-- `local_maxiters` (the maximum number of iterations)
-- `local_maxtime` (the maximum of time the optimization runs for)
-- `local_abstol` (absolute tolerance in changes of the objective value)
-- `local_reltol` (relative tolerance in changes of the objective value)
-- `local_options` (NamedTuple of keyword arguments for local optimizer)
+ - `local_method` (optimizer used for local optimization in global method)
+ - `local_maxiters` (the maximum number of iterations)
+ - `local_maxtime` (the maximum of time the optimization runs for)
+ - `local_abstol` (absolute tolerance in changes of the objective value)
+ - `local_reltol` (relative tolerance in changes of the objective value)
+ - `local_options` (NamedTuple of keyword arguments for local optimizer)
Some optimizer algorithms have special keyword arguments documented in the
solver portion of the documentation and their respective documentation.
@@ -68,7 +68,7 @@ The loss function here returns the loss and the prediction i.e. the solution of
```julia
function predict(u)
- Array(solve(prob, Tsit5(), p = u))
+ Array(solve(prob, Tsit5(); p = u))
end
function loss(u, p)
@@ -80,16 +80,18 @@ callback = function (p, l, pred; doplot = false) #callback function to observe t
display(l)
# plot current prediction against data
if doplot
- pl = scatter(t, ode_data[1, :], label = "data")
- scatter!(pl, t, pred[1, :], label = "prediction")
+ pl = scatter(t, ode_data[1, :]; label = "data")
+ scatter!(pl, t, pred[1, :]; label = "prediction")
display(plot(pl))
end
return false
end
```
"""
-function solve(prob::OptimizationProblem, alg, args...;
- kwargs...)::AbstractOptimizationSolution
+function solve(prob::OptimizationProblem,
+ alg,
+ args...;
+ kwargs...,)::AbstractOptimizationSolution
if supports_opt_cache_interface(alg)
solve!(init(prob, alg, args...; kwargs...))
else
@@ -98,20 +100,27 @@ function solve(prob::OptimizationProblem, alg, args...;
end
end
-function SciMLBase.solve(prob::EnsembleProblem{T}, args...; kwargs...) where {T <: OptimizationProblem}
+function SciMLBase.solve(prob::EnsembleProblem{T},
+ args...;
+ kwargs...,) where {T <: OptimizationProblem}
return SciMLBase.__solve(prob, args...; kwargs...)
end
function _check_opt_alg(prob::OptimizationProblem, alg; kwargs...)
- !allowsbounds(alg) && (!isnothing(prob.lb) || !isnothing(prob.ub)) &&
+ !allowsbounds(alg) &&
+ (!isnothing(prob.lb) || !isnothing(prob.ub)) &&
throw(IncompatibleOptimizerError("The algorithm $(typeof(alg)) does not support box constraints. Either remove the `lb` or `ub` bounds passed to `OptimizationProblem` or use a different algorithm."))
- requiresbounds(alg) && isnothing(prob.lb) &&
+ requiresbounds(alg) &&
+ isnothing(prob.lb) &&
throw(IncompatibleOptimizerError("The algorithm $(typeof(alg)) requires box constraints. Either pass `lb` and `ub` bounds to `OptimizationProblem` or use a different algorithm."))
- !allowsconstraints(alg) && !isnothing(prob.f.cons) &&
+ !allowsconstraints(alg) &&
+ !isnothing(prob.f.cons) &&
throw(IncompatibleOptimizerError("The algorithm $(typeof(alg)) does not support constraints. Either remove the `cons` function passed to `OptimizationFunction` or use a different algorithm."))
- requiresconstraints(alg) && isnothing(prob.f.cons) &&
+ requiresconstraints(alg) &&
+ isnothing(prob.f.cons) &&
throw(IncompatibleOptimizerError("The algorithm $(typeof(alg)) requires constraints, pass them with the `cons` kwarg in `OptimizationFunction`."))
- !allowscallback(alg) && haskey(kwargs, :callback) &&
+ !allowscallback(alg) &&
+ haskey(kwargs, :callback) &&
throw(IncompatibleOptimizerError("The algorithm $(typeof(alg)) does not support callbacks, remove the `callback` keyword argument from the `solve` call."))
return
end
@@ -146,11 +155,11 @@ init(prob::OptimizationProblem, alg::AbstractOptimizationAlgorithm, args...; kwa
The arguments to `init` are the same as to `solve` and common across all of the optimizers.
These common arguments are:
-- `maxiters` (the maximum number of iterations)
-- `maxtime` (the maximum of time the optimization runs for)
-- `abstol` (absolute tolerance in changes of the objective value)
-- `reltol` (relative tolerance in changes of the objective value)
-- `callback` (a callback function)
+ - `maxiters` (the maximum number of iterations)
+ - `maxtime` (the maximum of time the optimization runs for)
+ - `abstol` (absolute tolerance in changes of the objective value)
+ - `reltol` (relative tolerance in changes of the objective value)
+ - `callback` (a callback function)
Some optimizer algorithms have special keyword arguments documented in the
solver portion of the documentation and their respective documentation.
@@ -180,8 +189,10 @@ end
# needs to be defined for each cache
supports_opt_cache_interface(alg) = false
function __solve(cache::AbstractOptimizationCache)::AbstractOptimizationSolution end
-function __init(prob::OptimizationProblem, alg, args...;
- kwargs...)::AbstractOptimizationCache
+function __init(prob::OptimizationProblem,
+ alg,
+ args...;
+ kwargs...,)::AbstractOptimizationCache
throw(OptimizerMissingError(alg))
end
diff --git a/src/tabletraits.jl b/src/tabletraits.jl
index a977cc4cd7..b847bc1754 100644
--- a/src/tabletraits.jl
+++ b/src/tabletraits.jl
@@ -10,8 +10,11 @@ struct AbstractTimeseriesSolutionRows{T, U}
end
function AbstractTimeseriesSolutionRows(names, types, t, u)
- AbstractTimeseriesSolutionRows(names, types,
- Dict(nm => i for (i, nm) in enumerate(names)), t, u)
+ AbstractTimeseriesSolutionRows(names,
+ types,
+ Dict(nm => i for (i, nm) in enumerate(names)),
+ t,
+ u)
end
Base.length(x::AbstractTimeseriesSolutionRows) = length(x.u)
diff --git a/src/utils.jl b/src/utils.jl
index 6eeaffc094..a1828fa711 100644
--- a/src/utils.jl
+++ b/src/utils.jl
@@ -14,16 +14,11 @@ function numargs(f)
end
function numargs(f::RuntimeGeneratedFunctions.RuntimeGeneratedFunction{
- T,
- V,
- W,
- I,
-}) where {
- T,
- V,
- W,
- I,
-}
+ T,
+ V,
+ W,
+ I,
+ }) where {T, V, W, I}
(length(T),)
end
@@ -237,10 +232,15 @@ dispatch, i.e. `f(u,p)` for OptimizationFunction, and thus the check for the oop
form is disabled and the 2-argument signature is ensured to be matched.
# See also
-* [`numargs`](@ref numargs)
+
+ - [`numargs`](@ref numargs)
"""
-function isinplace(f, inplace_param_number, fname = "f", iip_preferred = true;
- has_two_dispatches = true, isoptimization = false)
+function isinplace(f,
+ inplace_param_number,
+ fname = "f",
+ iip_preferred = true;
+ has_two_dispatches = true,
+ isoptimization = false,)
nargs = numargs(f)
iip_dispatch = any(x -> x == inplace_param_number, nargs)
oop_dispatch = any(x -> x == inplace_param_number - 1, nargs)
@@ -261,10 +261,9 @@ function isinplace(f, inplace_param_number, fname = "f", iip_preferred = true;
else
methods(f).ms[1].sig.parameters
end
-
+
for i in 1:length(nargs)
- if nargs[i] < inplace_param_number &&
- any(isequal(Vararg{Any}),_parameters)
+ if nargs[i] < inplace_param_number && any(isequal(Vararg{Any}), _parameters)
# If varargs, assume iip
return iip_preferred
end
@@ -304,8 +303,9 @@ function isinplace(f, inplace_param_number, fname = "f", iip_preferred = true;
end
isinplace(f::AbstractSciMLFunction{iip}) where {iip} = iip
-function isinplace(f::AbstractSciMLFunction{iip}, inplace_param_number,
- fname = nothing) where {iip}
+function isinplace(f::AbstractSciMLFunction{iip},
+ inplace_param_number,
+ fname = nothing) where {iip}
iip
end
@@ -390,8 +390,7 @@ function add_kwonly(::Type{<:Val}, ex)
error("add_only does not work with expression $(ex.head)")
end
-function add_kwonly(::Union{Type{Val{:function}},
- Type{Val{:(=)}}}, ex::Expr)
+function add_kwonly(::Union{Type{Val{:function}}, Type{Val{:(=)}}}, ex::Expr)
body = ex.args[2:end] # function body
default_call = ex.args[1] # e.g., :(f(a, b=2; c=3))
kwonly_call = add_kwonly(default_call)
@@ -448,8 +447,9 @@ function add_kwonly(::Type{Val{:call}}, default_call::Expr)
end
kwonly_kwargs = Expr(:parameters,
- [Expr(:kw, pa, :(error($("No argument $pa"))))
- for pa in required]..., optional..., default_kwargs...)
+ [Expr(:kw, pa, :(error($("No argument $pa")))) for pa in required]...,
+ optional...,
+ default_kwargs...)
kwonly_call = Expr(:call, funcname, kwonly_kwargs)
# e.g., :(f(; a=error(...), b=error(...), c=1, d=2))
@@ -511,6 +511,7 @@ type which cannot be used as a state but can be converted to something that
can be, then you may define `prepare_initial_state(x::YourType) = ...`.
!!! warning
+
This function is experimental and may be removed in the future.
See also: `prepare_function`.
@@ -529,6 +530,7 @@ something that can be, then you may define `prepare_function(x::YourType) = ...`
the arity of a function is computed with `numargs`
!!! warning
+
This function is experimental and may be removed in the future.
See also: `prepare_initial_state`.
diff --git a/test/aqua.jl b/test/aqua.jl
index 6dbee8706d..c2212ebc63 100644
--- a/test/aqua.jl
+++ b/test/aqua.jl
@@ -25,7 +25,7 @@ end
@testset "Aqua tests (additional)" begin
Aqua.test_undefined_exports(SciMLBase)
Aqua.test_stale_deps(SciMLBase)
- Aqua.test_deps_compat(SciMLBase, check_extras = false)
+ Aqua.test_deps_compat(SciMLBase; check_extras = false)
Aqua.test_project_extras(SciMLBase)
# Aqua.test_project_toml_formatting(SciMLBase) # failing
# Aqua.test_piracy(SciMLBase) # failing
diff --git a/test/convert_tests.jl b/test/convert_tests.jl
index 20fba4f86d..b6904310a4 100644
--- a/test/convert_tests.jl
+++ b/test/convert_tests.jl
@@ -36,14 +36,14 @@ end
@testset "Convert ODEProblem with kwargs to NonlinearProblem" begin
function lorenz!(du, u, p, t)
- du[1] = p[1]*(u[2] - u[1])
+ du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = [1.0; 0.0; 0.0]
tspan = (0.0, 100.0)
- p = [10.0,28.0,8/3]
- prob = ODEProblem(lorenz!, u0, tspan,p;a=1.0,b=2.0)
+ p = [10.0, 28.0, 8 / 3]
+ prob = ODEProblem(lorenz!, u0, tspan, p; a = 1.0, b = 2.0)
nlprob = NonlinearProblem(prob)
@test nlprob.kwargs[:a] == prob.kwargs[:a]
@test nlprob.kwargs[:b] == prob.kwargs[:b]
@@ -51,15 +51,15 @@ end
@testset "Convert ODEProblem with kwargs to SteadyStateProblem" begin
function lorenz!(du, u, p, t)
- du[1] = p[1]*(u[2] - u[1])
+ du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = [1.0; 0.0; 0.0]
tspan = (0.0, 100.0)
- p = [10.0,28.0,8/3]
- prob = ODEProblem(lorenz!, u0, tspan,p;a=1.0,b=2.0)
+ p = [10.0, 28.0, 8 / 3]
+ prob = ODEProblem(lorenz!, u0, tspan, p; a = 1.0, b = 2.0)
sprob = SteadyStateProblem(prob)
@test sprob.kwargs[:a] == prob.kwargs[:a]
@test sprob.kwargs[:b] == prob.kwargs[:b]
-end
\ No newline at end of file
+end
diff --git a/test/downstream/ensemble_bvp.jl b/test/downstream/ensemble_bvp.jl
index ad08236c7b..f6bed67d1a 100644
--- a/test/downstream/ensemble_bvp.jl
+++ b/test/downstream/ensemble_bvp.jl
@@ -11,12 +11,12 @@ function bc!(residual, u, p, t)
end
function prob_func(prob, i, repeat)
- remake(prob, p = [rand()])
+ remake(prob; p = [rand()])
end
initial_guess = [0.0, 1.0]
tspan = (0.0, pi / 2)
p = [rand()]
bvp = BVProblem(ode!, bc!, initial_guess, tspan, p)
-ensemble_prob = EnsembleProblem(bvp, prob_func = prob_func)
-sim = solve(ensemble_prob, MIRK4(), trajectories = 10, dt = 0.1)
+ensemble_prob = EnsembleProblem(bvp; prob_func = prob_func)
+sim = solve(ensemble_prob, MIRK4(); trajectories = 10, dt = 0.1)
diff --git a/test/downstream/ensemble_diffeq.jl b/test/downstream/ensemble_diffeq.jl
index 7f53b65789..1d20eebd28 100644
--- a/test/downstream/ensemble_diffeq.jl
+++ b/test/downstream/ensemble_diffeq.jl
@@ -2,8 +2,8 @@ using OrdinaryDiffEq
prob = ODEProblem((u, p, t) -> 1.01u, 0.5, (0.0, 1.0))
function prob_func(prob, i, repeat)
- remake(prob, u0 = rand() * prob.u0)
+ remake(prob; u0 = rand() * prob.u0)
end
-ensemble_prob = EnsembleProblem(prob, prob_func = prob_func)
-sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 10)
-@test sim isa EnsembleSolution
\ No newline at end of file
+ensemble_prob = EnsembleProblem(prob; prob_func = prob_func)
+sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(); trajectories = 10)
+@test sim isa EnsembleSolution
diff --git a/test/downstream/ensemble_first_batch.jl b/test/downstream/ensemble_first_batch.jl
index 09043453c2..a9e7246257 100644
--- a/test/downstream/ensemble_first_batch.jl
+++ b/test/downstream/ensemble_first_batch.jl
@@ -2,13 +2,18 @@ using OrdinaryDiffEq, Test, Statistics
# test for https://github.com/SciML/SciMLBase.jl/issues/190
prob = ODEProblem((u, p, t) -> 1.01u, 0.5, (0.0, 1.0))
-prob_func(prob, i, repeat) = remake(prob, u0 = rand() * prob.u0)
+prob_func(prob, i, repeat) = remake(prob; u0 = rand() * prob.u0)
output_func(sol, i) = (last(sol), false)
reduction(u, batch, I) = (append!(u, mean(batch)), false)
# make sure first batch is timed (test using 1 batch but reduction)
-ensemble_prob = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func,
- reduction = reduction)
-sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 1000,
+ensemble_prob = EnsembleProblem(prob;
+ prob_func = prob_func,
+ output_func = output_func,
+ reduction = reduction,)
+sim = solve(ensemble_prob,
+ Tsit5(),
+ EnsembleThreads();
+ trajectories = 1000,
batch_size = 1000)
@test sim.elapsedTime > 1000 * @elapsed for i in 2:1
diff --git a/test/downstream/ensemble_multi_prob.jl b/test/downstream/ensemble_multi_prob.jl
index 9f09a58b2c..c2c74446d9 100644
--- a/test/downstream/ensemble_multi_prob.jl
+++ b/test/downstream/ensemble_multi_prob.jl
@@ -3,12 +3,9 @@ using ModelingToolkit, OrdinaryDiffEq, Test
@variables t, x(t), y(t)
D = Differential(t)
-@named sys1 = ODESystem([D(x) ~ x,
- D(y) ~ -y])
-@named sys2 = ODESystem([D(x) ~ 2x,
- D(y) ~ -2y])
-@named sys3 = ODESystem([D(x) ~ 3x,
- D(y) ~ -3y])
+@named sys1 = ODESystem([D(x) ~ x, D(y) ~ -y])
+@named sys2 = ODESystem([D(x) ~ 2x, D(y) ~ -2y])
+@named sys3 = ODESystem([D(x) ~ 3x, D(y) ~ -3y])
prob1 = ODEProblem(sys1, [1.0, 1.0], (0.0, 1.0))
prob2 = ODEProblem(sys2, [2.0, 2.0], (0.0, 1.0))
@@ -22,6 +19,6 @@ for i in 1:3
@test sol[y, :][i] == sol[i][y]
end
# Ensemble is a recursive array
-@test only.(sol(0.0, idxs = [x])) == sol[1, 1, :] == first.(sol[x, :])
+@test only.(sol(0.0; idxs = [x])) == sol[1, 1, :] == first.(sol[x, :])
# TODO: fix the interpolation
-@test only.(sol(1.0, idxs = [x])) ≈ last.(sol[x, :])
+@test only.(sol(1.0; idxs = [x])) ≈ last.(sol[x, :])
diff --git a/test/downstream/ensemble_nondes.jl b/test/downstream/ensemble_nondes.jl
index 789bdb4fc2..14fd2dd495 100644
--- a/test/downstream/ensemble_nondes.jl
+++ b/test/downstream/ensemble_nondes.jl
@@ -6,23 +6,42 @@ l1 = rosenbrock(x0)
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
prob = OptimizationProblem(optf, x0)
-sol1 = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 5)
+sol1 = Optimization.solve(prob, OptimizationOptimJL.BFGS(); maxiters = 5)
-ensembleprob = Optimization.EnsembleProblem(prob, [x0, x0 .+ rand(2), x0 .+ rand(2), x0 .+ rand(2)])
+ensembleprob = Optimization.EnsembleProblem(prob,
+ [x0, x0 .+ rand(2), x0 .+ rand(2), x0 .+ rand(2)])
-sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleThreads(), trajectories = 4, maxiters = 5)
+sol = Optimization.solve(ensembleprob,
+ OptimizationOptimJL.BFGS(),
+ EnsembleThreads();
+ trajectories = 4,
+ maxiters = 5,)
@test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective
-sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleDistributed(), trajectories = 4, maxiters = 5)
+sol = Optimization.solve(ensembleprob,
+ OptimizationOptimJL.BFGS(),
+ EnsembleDistributed();
+ trajectories = 4,
+ maxiters = 5,)
@test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective
-prob = OptimizationProblem(optf, x0, lb = [-0.5, -0.5], ub = [0.5, 0.5])
-ensembleprob = Optimization.EnsembleProblem(prob, 5, prob_func = (prob, i, repeat) -> remake(prob, u0 = rand(-0.5:0.001:0.5, 2)))
+prob = OptimizationProblem(optf, x0; lb = [-0.5, -0.5], ub = [0.5, 0.5])
+ensembleprob = Optimization.EnsembleProblem(prob,
+ 5;
+ prob_func = (prob, i, repeat) -> remake(prob; u0 = rand(-0.5:0.001:0.5, 2)),)
-sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleThreads(), trajectories = 5, maxiters = 5)
+sol = Optimization.solve(ensembleprob,
+ OptimizationOptimJL.BFGS(),
+ EnsembleThreads();
+ trajectories = 5,
+ maxiters = 5,)
@test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective
-sol = Optimization.solve(ensembleprob, OptimizationOptimJL.BFGS(), EnsembleDistributed(), trajectories = 5, maxiters = 5)
+sol = Optimization.solve(ensembleprob,
+ OptimizationOptimJL.BFGS(),
+ EnsembleDistributed();
+ trajectories = 5,
+ maxiters = 5,)
@test findmin(i -> sol[i].objective, 1:4)[1] < sol1.objective
using NonlinearSolve
@@ -33,6 +52,6 @@ p = 2.0
prob = NonlinearProblem(f, u0, p)
ensembleprob = EnsembleProblem(prob, [u0, u0 .+ rand(2), u0 .+ rand(2), u0 .+ rand(2)])
-sol = solve(ensembleprob, EnsembleThreads(), trajectories = 4, maxiters = 100)
+sol = solve(ensembleprob, EnsembleThreads(); trajectories = 4, maxiters = 100)
-sol = solve(ensembleprob, EnsembleDistributed(), trajectories = 4, maxiters = 100)
\ No newline at end of file
+sol = solve(ensembleprob, EnsembleDistributed(); trajectories = 4, maxiters = 100)
diff --git a/test/downstream/ensemble_stats.jl b/test/downstream/ensemble_stats.jl
index cec72f05ea..a083ad1113 100644
--- a/test/downstream/ensemble_stats.jl
+++ b/test/downstream/ensemble_stats.jl
@@ -1,13 +1,13 @@
using OrdinaryDiffEq
using Test
-f(u,p,t) = 1.01*u
-u0=1/2
-tspan = (0.0,1.0)
-prob = ODEProblem(f,u0,tspan)
+f(u, p, t) = 1.01 * u
+u0 = 1 / 2
+tspan = (0.0, 1.0)
+prob = ODEProblem(f, u0, tspan)
function prob_func(prob, i, repeat)
- remake(prob, u0 = rand() * prob.u0)
+ remake(prob; u0 = rand() * prob.u0)
end
-ensemble_prob = EnsembleProblem(prob, prob_func = prob_func)
-sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 10)
-@test sim.stats.nf == mapreduce(x -> x.stats.nf, +, sim.u)
\ No newline at end of file
+ensemble_prob = EnsembleProblem(prob; prob_func = prob_func)
+sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(); trajectories = 10)
+@test sim.stats.nf == mapreduce(x -> x.stats.nf, +, sim.u)
diff --git a/test/downstream/ensemble_zero_length.jl b/test/downstream/ensemble_zero_length.jl
index 917348d563..0150ba78bc 100644
--- a/test/downstream/ensemble_zero_length.jl
+++ b/test/downstream/ensemble_zero_length.jl
@@ -1,24 +1,31 @@
using OrdinaryDiffEq, Test, LinearAlgebra
-prob = ODEProblem((u, p, t) -> 1.01u, 0.5, (0.0, 1.0), save_start = false, save_end = false)
+prob = ODEProblem((u, p, t) -> 1.01u, 0.5, (0.0, 1.0); save_start = false, save_end = false)
function prob_func(prob, i, repeat)
- remake(prob, u0 = rand() * prob.u0)
+ remake(prob; u0 = rand() * prob.u0)
end
-ensemble_prob = EnsembleProblem(prob, prob_func = prob_func)
-sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 10,
- save_everystep = false)
+ensemble_prob = EnsembleProblem(prob; prob_func = prob_func)
+sim = solve(ensemble_prob,
+ Tsit5(),
+ EnsembleThreads();
+ trajectories = 10,
+ save_everystep = false,)
@test ndims(sim) == 2
@test length(sim) == 10
@test eltype(sim.u) <: ODESolution
ts = 0.0:0.1:1.0
using SciMLBase.EnsembleAnalysis
-sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 10, saveat = 0.1)
+sim = solve(ensemble_prob, Tsit5(), EnsembleThreads(); trajectories = 10, saveat = 0.1)
timeseries_point_meancov(sim, ts)
timeseries_point_median(sim, ts)
function prob_sol(_p)
- prob = ODEProblem((u, p, t) -> p .* u, _p, (0.0, 1.0), _p, save_start = false,
- save_end = false)
+ prob = ODEProblem((u, p, t) -> p .* u,
+ _p,
+ (0.0, 1.0),
+ _p;
+ save_start = false,
+ save_end = false,)
sim = solve(prob, Tsit5())
end
mapres = SciMLBase.responsible_map(prob_sol, [0.5, diagm([1.0, 1.0])])
diff --git a/test/downstream/integer_idxs.jl b/test/downstream/integer_idxs.jl
index e9bed47d34..e3c9bb7eaf 100644
--- a/test/downstream/integer_idxs.jl
+++ b/test/downstream/integer_idxs.jl
@@ -10,4 +10,4 @@ tspan = (0.0, 100.0)
prob = ODEProblem(lorenz!, u0, tspan)
sol = solve(prob, CVODE_Adams())
-sol(90:1:100, idxs = 1).u isa Array{Float64}
+sol(90:1:100; idxs = 1).u isa Array{Float64}
diff --git a/test/downstream/integrator_indexing.jl b/test/downstream/integrator_indexing.jl
index 6ab1557325..8bf238df88 100644
--- a/test/downstream/integrator_indexing.jl
+++ b/test/downstream/integrator_indexing.jl
@@ -6,8 +6,7 @@ using ModelingToolkit, OrdinaryDiffEq, RecursiveArrayTools, StochasticDiffEq, Te
@variables s1(t) s2(t)
D = Differential(t)
-eqs = [D(s1) ~ a * s1 / (1 + s1 + s2) - b * s1,
- D(s2) ~ +c * s2 / (1 + s1 + s2) - d * s2]
+eqs = [D(s1) ~ a * s1 / (1 + s1 + s2) - b * s1, D(s2) ~ +c * s2 / (1 + s1 + s2) - d * s2]
@named population_model = ODESystem(eqs)
@@ -51,8 +50,7 @@ integrator[:s1] = 1.0
@test integrator[s1] == integrator[population_model.s1] == integrator[:s1] == 1.0
# Tests on SDEProblem
-noiseeqs = [0.1 * s1,
- 0.1 * s2]
+noiseeqs = [0.1 * s1, 0.1 * s2]
@named noisy_population_model = SDESystem(population_model, noiseeqs)
sprob = SDEProblem(noisy_population_model, u0, (0.0, 100.0), p)
integrator = init(sprob, ImplicitEM())
@@ -84,35 +82,36 @@ integrator[:s1] = 1.0
@variables x(t) y(t) z(t)
D = Differential(t)
-eqs = [D(x) ~ σ * (y - x),
- D(y) ~ x * (ρ - z) - y,
- D(z) ~ x * y - β * z]
+eqs = [D(x) ~ σ * (y - x), D(y) ~ x * (ρ - z) - y, D(z) ~ x * y - β * z]
@named lorenz1 = ODESystem(eqs)
@named lorenz2 = ODESystem(eqs)
@parameters γ
@variables a(t) α(t)
-connections = [0 ~ lorenz1.x + lorenz2.y + a * γ,
- α ~ 2lorenz1.x + a * γ]
-@named sys = ODESystem(connections, t, [a, α], [γ], systems = [lorenz1, lorenz2])
+connections = [0 ~ lorenz1.x + lorenz2.y + a * γ, α ~ 2lorenz1.x + a * γ]
+@named sys = ODESystem(connections, t, [a, α], [γ]; systems = [lorenz1, lorenz2])
sys_simplified = structural_simplify(sys)
-u0 = [lorenz1.x => 1.0,
+u0 = [
+ lorenz1.x => 1.0,
lorenz1.y => 0.0,
lorenz1.z => 0.0,
lorenz2.x => 0.0,
lorenz2.y => 1.0,
lorenz2.z => 0.0,
- a => 2.0]
+ a => 2.0,
+]
-p = [lorenz1.σ => 10.0,
+p = [
+ lorenz1.σ => 10.0,
lorenz1.ρ => 28.0,
lorenz1.β => 8 / 3,
lorenz2.σ => 10.0,
lorenz2.ρ => 28.0,
lorenz2.β => 8 / 3,
- γ => 2.0]
+ γ => 2.0,
+]
tspan = (0.0, 100.0)
prob = ODEProblem(sys_simplified, u0, tspan, p)
diff --git a/test/downstream/nllsopt.jl b/test/downstream/nllsopt.jl
index e70ed8ac25..a7d5204ac6 100644
--- a/test/downstream/nllsopt.jl
+++ b/test/downstream/nllsopt.jl
@@ -25,4 +25,4 @@ optprob = OptimizationProblem(optf, prob_oop.u0, prob_oop.p)
@time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8)
optprob = OptimizationProblem(prob_oop, AutoForwardDiff())
-@time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8)
\ No newline at end of file
+@time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8)
diff --git a/test/downstream/problem_interface.jl b/test/downstream/problem_interface.jl
index 3e4cb9cef7..2335515db3 100644
--- a/test/downstream/problem_interface.jl
+++ b/test/downstream/problem_interface.jl
@@ -4,27 +4,20 @@ using ModelingToolkit, OrdinaryDiffEq, Test
@variables t x(t) y(t) z(t)
D = Differential(t)
-eqs = [D(D(x)) ~ σ * (y - x),
- D(y) ~ x * (ρ - z) - y,
- D(z) ~ x * y - β * z]
+eqs = [D(D(x)) ~ σ * (y - x), D(y) ~ x * (ρ - z) - y, D(z) ~ x * y - β * z]
@named sys = ODESystem(eqs)
sys = structural_simplify(sys)
-u0 = [D(x) => 2.0,
- x => 1.0,
- y => 0.0,
- z => 0.0]
+u0 = [D(x) => 2.0, x => 1.0, y => 0.0, z => 0.0]
-p = [σ => 28.0,
- ρ => 10.0,
- β => 8 / 3]
+p = [σ => 28.0, ρ => 10.0, β => 8 / 3]
tspan = (0.0, 100.0)
# ODEProblem.
-oprob = ODEProblem(sys, u0, tspan, p, jac = true)
+oprob = ODEProblem(sys, u0, tspan, p; jac = true)
@test oprob[σ] == oprob[sys.σ] == oprob[:σ] == 28.0
@test oprob[ρ] == oprob[sys.ρ] == oprob[:ρ] == 10.0
@@ -49,9 +42,7 @@ oprob[:z] = 1.0
@test oprob[z] == oprob[sys.z] == oprob[:z] == 1.0
# SDEProblem.
-noiseeqs = [0.1 * x,
- 0.1 * y,
- 0.1 * z]
+noiseeqs = [0.1 * x, 0.1 * y, 0.1 * z]
@named noise_sys = SDESystem(sys, noiseeqs)
sprob = SDEProblem(noise_sys, u0, (0.0, 100.0), p)
u0
diff --git a/test/downstream/remake_autodiff.jl b/test/downstream/remake_autodiff.jl
index a83c3fe68d..72fbe711d2 100644
--- a/test/downstream/remake_autodiff.jl
+++ b/test/downstream/remake_autodiff.jl
@@ -5,25 +5,25 @@ D = Differential(t)
function lotka_volterra(; name = name)
states = @variables x(t)=1.0 y(t)=1.0 o(t)
params = @parameters p1=1.5 p2=1.0 p3=3.0 p4=1.0
- eqs = [
- D(x) ~ p1 * x - p2 * x * y,
- D(y) ~ -p3 * y + p4 * x * y,
- o ~ x * y,
- ]
+ eqs = [D(x) ~ p1 * x - p2 * x * y, D(y) ~ -p3 * y + p4 * x * y, o ~ x * y]
return ODESystem(eqs, t, states, params; name = name)
end
@named lotka_volterra_sys = lotka_volterra()
lotka_volterra_sys = structural_simplify(lotka_volterra_sys)
prob = ODEProblem(lotka_volterra_sys, [], (0.0, 10.0), [])
-sol = solve(prob, Tsit5(), reltol = 1e-6, abstol = 1e-6)
+sol = solve(prob, Tsit5(); reltol = 1e-6, abstol = 1e-6)
u0 = [1.0 1.0]
p = [1.5 1.0 1.0 1.0]
function sum_of_solution(u0, p)
- _prob = remake(prob, u0 = u0, p = p)
- sum(solve(_prob, Tsit5(), reltol = 1e-6, abstol = 1e-6, saveat = 0.1,
- sensealg = BacksolveAdjoint(autojacvec = ZygoteVJP())))
+ _prob = remake(prob; u0 = u0, p = p)
+ sum(solve(_prob,
+ Tsit5();
+ reltol = 1e-6,
+ abstol = 1e-6,
+ saveat = 0.1,
+ sensealg = BacksolveAdjoint(; autojacvec = ZygoteVJP()),))
end
du01, dp1 = Zygote.gradient(sum_of_solution, u0, p)
@@ -32,18 +32,26 @@ du01, dp1 = Zygote.gradient(sum_of_solution, u0, p)
# package exentsions do not exist before 1.9, so they cannot work.
if VERSION >= v"1.9"
function symbolic_indexing(u0, p)
- _prob = remake(prob, u0 = u0, p = p)
- soln = solve(_prob, Tsit5(), reltol = 1e-6, abstol = 1e-6, saveat = 0.1,
- sensealg = BacksolveAdjoint(autojacvec = ZygoteVJP()))
+ _prob = remake(prob; u0 = u0, p = p)
+ soln = solve(_prob,
+ Tsit5();
+ reltol = 1e-6,
+ abstol = 1e-6,
+ saveat = 0.1,
+ sensealg = BacksolveAdjoint(; autojacvec = ZygoteVJP()),)
sum(soln[x])
end
du01, dp1 = Zygote.gradient(symbolic_indexing, u0, p)
function symbolic_indexing_observed(u0, p)
- _prob = remake(prob, u0 = u0, p = p)
- soln = solve(_prob, Tsit5(), reltol = 1e-6, abstol = 1e-6, saveat = 0.1,
- sensealg = BacksolveAdjoint(autojacvec = ZygoteVJP()))
+ _prob = remake(prob; u0 = u0, p = p)
+ soln = solve(_prob,
+ Tsit5();
+ reltol = 1e-6,
+ abstol = 1e-6,
+ saveat = 0.1,
+ sensealg = BacksolveAdjoint(; autojacvec = ZygoteVJP()),)
sum(soln[o, i] for i in 1:length(soln))
end
diff --git a/test/downstream/solution_interface.jl b/test/downstream/solution_interface.jl
index 11dcc7a14f..338bd8ac5f 100644
--- a/test/downstream/solution_interface.jl
+++ b/test/downstream/solution_interface.jl
@@ -6,8 +6,7 @@ using ModelingToolkit, OrdinaryDiffEq, RecursiveArrayTools, StochasticDiffEq, Te
@variables s1(t) s2(t)
D = Differential(t)
-eqs = [D(s1) ~ a * s1 / (1 + s1 + s2) - b * s1,
- D(s2) ~ +c * s2 / (1 + s1 + s2) - d * s2]
+eqs = [D(s1) ~ a * s1 / (1 + s1 + s2) - b * s1, D(s2) ~ +c * s2 / (1 + s1 + s2) - d * s2]
@named population_model = ODESystem(eqs)
@@ -23,8 +22,7 @@ sol = solve(oprob, Rodas4())
@test sol[s1][end] ≈ 1.0
# Tests on SDEProblem
-noiseeqs = [0.1 * s1,
- 0.1 * s2]
+noiseeqs = [0.1 * s1, 0.1 * s2]
@named noisy_population_model = SDESystem(population_model, noiseeqs)
sprob = SDEProblem(noisy_population_model, u0, (0.0, 100.0), p)
sol = solve(sprob, ImplicitEM())
@@ -38,35 +36,36 @@ sol = solve(sprob, ImplicitEM())
@variables x(t) y(t) z(t)
D = Differential(t)
-eqs = [D(x) ~ σ * (y - x),
- D(y) ~ x * (ρ - z) - y,
- D(z) ~ x * y - β * z]
+eqs = [D(x) ~ σ * (y - x), D(y) ~ x * (ρ - z) - y, D(z) ~ x * y - β * z]
@named lorenz1 = ODESystem(eqs)
@named lorenz2 = ODESystem(eqs)
@parameters γ
@variables a(t) α(t)
-connections = [0 ~ lorenz1.x + lorenz2.y + a * γ,
- α ~ 2lorenz1.x + a * γ]
-@named sys = ODESystem(connections, t, [a, α], [γ], systems = [lorenz1, lorenz2])
+connections = [0 ~ lorenz1.x + lorenz2.y + a * γ, α ~ 2lorenz1.x + a * γ]
+@named sys = ODESystem(connections, t, [a, α], [γ]; systems = [lorenz1, lorenz2])
sys_simplified = structural_simplify(sys)
-u0 = [lorenz1.x => 1.0,
+u0 = [
+ lorenz1.x => 1.0,
lorenz1.y => 0.0,
lorenz1.z => 0.0,
lorenz2.x => 0.0,
lorenz2.y => 1.0,
lorenz2.z => 0.0,
- a => 2.0]
+ a => 2.0,
+]
-p = [lorenz1.σ => 10.0,
+p = [
+ lorenz1.σ => 10.0,
lorenz1.ρ => 28.0,
lorenz1.β => 8 / 3,
lorenz2.σ => 10.0,
lorenz2.ρ => 28.0,
lorenz2.β => 8 / 3,
- γ => 2.0]
+ γ => 2.0,
+]
tspan = (0.0, 100.0)
prob = ODEProblem(sys_simplified, u0, tspan, p)
diff --git a/test/downstream/symbol_indexing.jl b/test/downstream/symbol_indexing.jl
index 83a6ec815e..62e51ff1d9 100644
--- a/test/downstream/symbol_indexing.jl
+++ b/test/downstream/symbol_indexing.jl
@@ -5,35 +5,36 @@ using Optimization, OptimizationOptimJL
@variables x(t) y(t) z(t)
D = Differential(t)
-eqs = [D(x) ~ σ * (y - x),
- D(y) ~ x * (ρ - z) - y,
- D(z) ~ x * y - β * z]
+eqs = [D(x) ~ σ * (y - x), D(y) ~ x * (ρ - z) - y, D(z) ~ x * y - β * z]
@named lorenz1 = ODESystem(eqs)
@named lorenz2 = ODESystem(eqs)
@parameters γ
@variables a(t) α(t)
-connections = [0 ~ lorenz1.x + lorenz2.y + a * γ,
- α ~ 2lorenz1.x + a * γ]
-@named sys = ODESystem(connections, t, [a, α], [γ], systems = [lorenz1, lorenz2])
+connections = [0 ~ lorenz1.x + lorenz2.y + a * γ, α ~ 2lorenz1.x + a * γ]
+@named sys = ODESystem(connections, t, [a, α], [γ]; systems = [lorenz1, lorenz2])
sys_simplified = structural_simplify(sys)
-u0 = [lorenz1.x => 1.0,
+u0 = [
+ lorenz1.x => 1.0,
lorenz1.y => 0.0,
lorenz1.z => 0.0,
lorenz2.x => 0.0,
lorenz2.y => 1.0,
lorenz2.z => 0.0,
- a => 2.0]
+ a => 2.0,
+]
-p = [lorenz1.σ => 10.0,
+p = [
+ lorenz1.σ => 10.0,
lorenz1.ρ => 28.0,
lorenz1.β => 8 / 3,
lorenz2.σ => 10.0,
lorenz2.ρ => 28.0,
lorenz2.β => 8 / 3,
- γ => 2.0]
+ γ => 2.0,
+]
tspan = (0.0, 100.0)
prob = ODEProblem(sys_simplified, u0, tspan, p)
@@ -119,22 +120,22 @@ sol2 = sol(0.1)
@test length(sol2) == length(states(sys_simplified))
@test first(sol2) isa Real
-sol3 = sol(0.0:1.0:10.0, idxs = [lorenz1.x, lorenz2.x])
+sol3 = sol(0.0:1.0:10.0; idxs = [lorenz1.x, lorenz2.x])
@test sol3.u isa Vector
@test first(sol3.u) isa Vector
@test length(sol3.u) == 11
@test length(sol3.t) == 11
@test collect(sol3[t]) ≈ sol3.t
@test collect(sol3[t, 1:5]) ≈ sol3.t[1:5]
-@test sol(0.0:1.0:10.0, idxs = [lorenz1.x, 1]) isa RecursiveArrayTools.DiffEqArray
+@test sol(0.0:1.0:10.0; idxs = [lorenz1.x, 1]) isa RecursiveArrayTools.DiffEqArray
-sol4 = sol(0.1, idxs = [lorenz1.x, lorenz2.x])
+sol4 = sol(0.1; idxs = [lorenz1.x, lorenz2.x])
@test sol4 isa Vector
@test length(sol4) == 2
@test first(sol4) isa Real
-@test sol(0.1, idxs = [lorenz1.x, 1]) isa Vector{Real}
+@test sol(0.1; idxs = [lorenz1.x, 1]) isa Vector{Real}
-sol5 = sol(0.0:1.0:10.0, idxs = lorenz1.x)
+sol5 = sol(0.0:1.0:10.0; idxs = lorenz1.x)
@test sol5.u isa Vector
@test first(sol5.u) isa Real
@test length(sol5.u) == 11
@@ -143,11 +144,11 @@ sol5 = sol(0.0:1.0:10.0, idxs = lorenz1.x)
@test collect(sol5[t, 1:5]) ≈ sol3.t[1:5]
@test_throws Any sol(0.0:1.0:10.0, idxs = 1.2)
-sol6 = sol(0.1, idxs = lorenz1.x)
+sol6 = sol(0.1; idxs = lorenz1.x)
@test sol6 isa Real
@test_throws Any sol(0.1, idxs = 1.2)
-sol7 = sol(0.0:1.0:10.0, idxs = [2, 1])
+sol7 = sol(0.0:1.0:10.0; idxs = [2, 1])
@test sol7.u isa Vector
@test first(sol7.u) isa Vector
@test length(sol7.u) == 11
@@ -155,12 +156,12 @@ sol7 = sol(0.0:1.0:10.0, idxs = [2, 1])
@test collect(sol7[t]) ≈ sol3.t
@test collect(sol7[t, 1:5]) ≈ sol3.t[1:5]
-sol8 = sol(0.1, idxs = [2, 1])
+sol8 = sol(0.1; idxs = [2, 1])
@test sol8 isa Vector
@test length(sol8) == 2
@test first(sol8) isa Real
-sol9 = sol(0.0:1.0:10.0, idxs = 2)
+sol9 = sol(0.0:1.0:10.0; idxs = 2)
@test sol9.u isa Vector
@test first(sol9.u) isa Real
@test length(sol9.u) == 11
@@ -168,7 +169,7 @@ sol9 = sol(0.0:1.0:10.0, idxs = 2)
@test collect(sol9[t]) ≈ sol3.t
@test collect(sol9[t, 1:5]) ≈ sol3.t[1:5]
-sol10 = sol(0.1, idxs = 2)
+sol10 = sol(0.1; idxs = 2)
@test sol10 isa Real
#=
@@ -215,7 +216,7 @@ sol = solve(prob, Tsit5())
y => 2.0]
p = [a => 1.0
b => 100.0]
- prob = OptimizationProblem(sys, u0, p, grad = true, hess = true)
+ prob = OptimizationProblem(sys, u0, p; grad = true, hess = true)
sol = solve(prob, GradientDescent())
@test sol[x]≈1 atol=1e-3
@test sol[y]≈1 atol=1e-3
diff --git a/test/downstream/unitful_interpolations.jl b/test/downstream/unitful_interpolations.jl
index 00b7725d70..6a66fa0be8 100644
--- a/test/downstream/unitful_interpolations.jl
+++ b/test/downstream/unitful_interpolations.jl
@@ -8,5 +8,5 @@ xU0 = 1.0u"g/L"
tspanU = (0.0u"hr", 5.0u"hr")
probU = ODEProblem(rhsU, xU0, tspanU, pU)
solU = solve(probU, Tsit5())
-tU = range(tspanU[1], tspanU[2], length = 20)
+tU = range(tspanU[1], tspanU[2]; length = 20)
xU = solU(tU)
diff --git a/test/existence_functions.jl b/test/existence_functions.jl
index 9c959fc992..9333648d19 100644
--- a/test/existence_functions.jl
+++ b/test/existence_functions.jl
@@ -1,8 +1,21 @@
using Test, SciMLBase
-using SciMLBase: __has_jac, __has_tgrad, __has_Wfact, __has_Wfact_t,
- __has_paramjac, __has_syms, __has_analytic, __has_colorvec, has_jac,
+using SciMLBase:
+ __has_jac,
+ __has_tgrad,
+ __has_Wfact,
+ __has_Wfact_t,
+ __has_paramjac,
+ __has_syms,
+ __has_analytic,
+ __has_colorvec,
+ has_jac,
has_tgrad,
- has_Wfact, has_Wfact_t, has_paramjac, has_syms, has_analytic, has_colorvec,
+ has_Wfact,
+ has_Wfact_t,
+ has_paramjac,
+ has_syms,
+ has_analytic,
+ has_colorvec,
AbstractDiffEqFunction
struct Foo <: AbstractDiffEqFunction{false}
diff --git a/test/function_building_error_messages.jl b/test/function_building_error_messages.jl
index 486fde70f3..10b5f4df97 100644
--- a/test/function_building_error_messages.jl
+++ b/test/function_building_error_messages.jl
@@ -9,7 +9,8 @@ function test_num_args()
numpar2 = SciMLBase.numargs(g) # [2]
numpar3 = SciMLBase.numargs(sqrt ∘ g) # [2]
@show numpar, minimum(numpar) == 1, maximum(numpar) == 2
- minimum(numpar) == 1 && maximum(numpar) == 2 &&
+ minimum(numpar) == 1 &&
+ maximum(numpar) == 2 &&
maximum(numpar2) == 2 &&
only(numpar3) == 2
end
@@ -21,7 +22,7 @@ end
struct Foo{T} end
f = Foo{1}()
-(this::Foo{T})(args...) where T=1
+(this::Foo{T})(args...) where {T} = 1
@test SciMLBase.isinplace(Foo{Int}(), 4)
## Problem argument tests
@@ -55,10 +56,10 @@ jac(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(foop, jac = jac)
jac(u, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError ODEFunction(fiip, jac = jac)
-ODEFunction(foop, jac = jac)
+ODEFunction(foop; jac = jac)
jac(du, u, p, t) = [1.0]
-ODEFunction(fiip, jac = jac)
-ODEFunction(foop, jac = jac)
+ODEFunction(fiip; jac = jac)
+ODEFunction(foop; jac = jac)
Wfact(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(fiip, Wfact = Wfact)
@@ -68,10 +69,10 @@ Wfact(u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(foop, Wfact = Wfact)
Wfact(u, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError ODEFunction(fiip, Wfact = Wfact)
-ODEFunction(foop, Wfact = Wfact)
+ODEFunction(foop; Wfact = Wfact)
Wfact(du, u, p, gamma, t) = [1.0]
-ODEFunction(fiip, Wfact = Wfact)
-ODEFunction(foop, Wfact = Wfact)
+ODEFunction(fiip; Wfact = Wfact)
+ODEFunction(foop; Wfact = Wfact)
Wfact_t(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(fiip, Wfact_t = Wfact_t)
@@ -81,50 +82,50 @@ Wfact_t(u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(foop, Wfact_t = Wfact_t)
Wfact_t(u, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError ODEFunction(fiip, Wfact_t = Wfact_t)
-ODEFunction(foop, Wfact_t = Wfact_t)
+ODEFunction(foop; Wfact_t = Wfact_t)
Wfact_t(du, u, p, gamma, t) = [1.0]
-ODEFunction(fiip, Wfact_t = Wfact_t)
-ODEFunction(foop, Wfact_t = Wfact_t)
+ODEFunction(fiip; Wfact_t = Wfact_t)
+ODEFunction(foop; Wfact_t = Wfact_t)
tgrad(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(fiip, tgrad = tgrad)
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(foop, tgrad = tgrad)
tgrad(u, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError ODEFunction(fiip, tgrad = tgrad)
-ODEFunction(foop, tgrad = tgrad)
+ODEFunction(foop; tgrad = tgrad)
tgrad(du, u, p, t) = [1.0]
-ODEFunction(fiip, tgrad = tgrad)
-ODEFunction(foop, tgrad = tgrad)
+ODEFunction(fiip; tgrad = tgrad)
+ODEFunction(foop; tgrad = tgrad)
paramjac(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(fiip, paramjac = paramjac)
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(foop, paramjac = paramjac)
paramjac(u, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError ODEFunction(fiip, paramjac = paramjac)
-ODEFunction(foop, paramjac = paramjac)
+ODEFunction(foop; paramjac = paramjac)
paramjac(du, u, p, t) = [1.0]
-ODEFunction(fiip, paramjac = paramjac)
-ODEFunction(foop, paramjac = paramjac)
+ODEFunction(fiip; paramjac = paramjac)
+ODEFunction(foop; paramjac = paramjac)
jvp(u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(fiip, jvp = jvp)
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(foop, jvp = jvp)
jvp(u, v, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError ODEFunction(fiip, jvp = jvp)
-ODEFunction(foop, jvp = jvp)
+ODEFunction(foop; jvp = jvp)
jvp(du, u, v, p, t) = [1.0]
-ODEFunction(fiip, jvp = jvp)
-ODEFunction(foop, jvp = jvp)
+ODEFunction(fiip; jvp = jvp)
+ODEFunction(foop; jvp = jvp)
vjp(u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(fiip, vjp = vjp)
@test_throws SciMLBase.TooFewArgumentsError ODEFunction(foop, vjp = vjp)
vjp(u, v, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError ODEFunction(fiip, vjp = vjp)
-ODEFunction(foop, vjp = vjp)
+ODEFunction(foop; vjp = vjp)
vjp(du, u, v, p, t) = [1.0]
-ODEFunction(fiip, vjp = vjp)
-ODEFunction(foop, vjp = vjp)
+ODEFunction(fiip; vjp = vjp)
+ODEFunction(foop; vjp = vjp)
# SDE
@@ -153,10 +154,10 @@ sjac(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(foop, goop, jac = sjac)
sjac(u, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, giip, jac = sjac)
-SDEFunction(foop, goop, jac = sjac)
+SDEFunction(foop, goop; jac = sjac)
sjac(du, u, p, t) = [1.0]
-SDEFunction(fiip, giip, jac = sjac)
-SDEFunction(foop, goop, jac = sjac)
+SDEFunction(fiip, giip; jac = sjac)
+SDEFunction(foop, goop; jac = sjac)
sWfact(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(fiip, giip, Wfact = sWfact)
@@ -168,10 +169,10 @@ sWfact(u, p, t) = [1.0]
sWfact(u, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, giip, Wfact = sWfact)
@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, goop, Wfact = sWfact)
-SDEFunction(foop, goop, Wfact = sWfact)
+SDEFunction(foop, goop; Wfact = sWfact)
sWfact(du, u, p, gamma, t) = [1.0]
-SDEFunction(fiip, giip, Wfact = sWfact)
-SDEFunction(foop, goop, Wfact = sWfact)
+SDEFunction(fiip, giip; Wfact = sWfact)
+SDEFunction(foop, goop; Wfact = sWfact)
sWfact_t(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(fiip, giip, Wfact_t = sWfact_t)
@@ -180,53 +181,55 @@ sWfact_t(u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(fiip, giip, Wfact_t = sWfact_t)
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(foop, giip, Wfact_t = sWfact_t)
sWfact_t(u, p, gamma, t) = [1.0]
-@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, giip,
+@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip,
+ giip,
Wfact_t = sWfact_t)
-SDEFunction(foop, goop, Wfact_t = sWfact_t)
+SDEFunction(foop, goop; Wfact_t = sWfact_t)
sWfact_t(du, u, p, gamma, t) = [1.0]
-SDEFunction(fiip, giip, Wfact_t = sWfact_t)
-SDEFunction(foop, goop, Wfact_t = sWfact_t)
+SDEFunction(fiip, giip; Wfact_t = sWfact_t)
+SDEFunction(foop, goop; Wfact_t = sWfact_t)
stgrad(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(fiip, giip, tgrad = stgrad)
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(foop, goop, tgrad = stgrad)
stgrad(u, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, giip, tgrad = stgrad)
-SDEFunction(foop, goop, tgrad = stgrad)
+SDEFunction(foop, goop; tgrad = stgrad)
stgrad(du, u, p, t) = [1.0]
-SDEFunction(fiip, giip, tgrad = stgrad)
-SDEFunction(foop, goop, tgrad = stgrad)
+SDEFunction(fiip, giip; tgrad = stgrad)
+SDEFunction(foop, goop; tgrad = stgrad)
sparamjac(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(fiip, giip, paramjac = sparamjac)
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(foop, goop, paramjac = sparamjac)
sparamjac(u, p, t) = [1.0]
-@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, giip,
+@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip,
+ giip,
paramjac = sparamjac)
-SDEFunction(foop, goop, paramjac = sparamjac)
+SDEFunction(foop, goop; paramjac = sparamjac)
sparamjac(du, u, p, t) = [1.0]
-SDEFunction(fiip, giip, paramjac = sparamjac)
-SDEFunction(foop, goop, paramjac = sparamjac)
+SDEFunction(fiip, giip; paramjac = sparamjac)
+SDEFunction(foop, goop; paramjac = sparamjac)
sjvp(u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(fiip, giip, jvp = sjvp)
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(foop, goop, jvp = sjvp)
sjvp(u, v, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, giip, jvp = sjvp)
-SDEFunction(foop, goop, jvp = sjvp)
+SDEFunction(foop, goop; jvp = sjvp)
sjvp(du, u, v, p, t) = [1.0]
-SDEFunction(fiip, giip, jvp = sjvp)
-SDEFunction(foop, goop, jvp = sjvp)
+SDEFunction(fiip, giip; jvp = sjvp)
+SDEFunction(foop, goop; jvp = sjvp)
svjp(u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(fiip, giip, vjp = svjp)
@test_throws SciMLBase.TooFewArgumentsError SDEFunction(foop, goop, vjp = svjp)
svjp(u, v, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError SDEFunction(fiip, giip, vjp = svjp)
-SDEFunction(foop, goop, vjp = svjp)
+SDEFunction(foop, goop; vjp = svjp)
svjp(du, u, v, p, t) = [1.0]
-SDEFunction(fiip, giip, vjp = svjp)
-SDEFunction(foop, goop, vjp = svjp)
+SDEFunction(fiip, giip; vjp = svjp)
+SDEFunction(foop, goop; vjp = svjp)
# RODEFunction
@@ -252,7 +255,8 @@ end
@test_nowarn RODEFunction(frode)
@test_nowarn RODEFunction(frode, analytic = rode_analytic)
@test_nowarn RODEFunction(frode, analytic = rode_analytic!, analytic_full = true)
-@test_throws MethodError RODEFunction(frode, analytic = rode_analytic!,
+@test_throws MethodError RODEFunction(frode,
+ analytic = rode_analytic!,
analytic_full = nothing)
# DAEFunction
@@ -278,10 +282,10 @@ djac(du, u, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DAEFunction(dfoop, jac = djac)
djac(du, u, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DAEFunction(dfiip, jac = djac)
-DAEFunction(dfoop, jac = djac)
+DAEFunction(dfoop; jac = djac)
djac(res, du, u, p, gamma, t) = [1.0]
-DAEFunction(dfiip, jac = djac)
-DAEFunction(dfoop, jac = djac)
+DAEFunction(dfiip; jac = djac)
+DAEFunction(dfoop; jac = djac)
djvp(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DAEFunction(dfiip, jvp = djvp)
@@ -297,10 +301,10 @@ djvp(du, u, v, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DAEFunction(dfoop, jvp = djvp)
djvp(du, u, v, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DAEFunction(dfiip, jvp = djvp)
-DAEFunction(dfoop, jvp = djvp)
+DAEFunction(dfoop; jvp = djvp)
djvp(res, du, u, v, p, gamma, t) = [1.0]
-DAEFunction(dfiip, jvp = djvp)
-DAEFunction(dfoop, jvp = djvp)
+DAEFunction(dfiip; jvp = djvp)
+DAEFunction(dfoop; jvp = djvp)
dvjp(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DAEFunction(dfiip, vjp = dvjp)
@@ -316,11 +320,11 @@ dvjp(du, u, v, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DAEFunction(dfoop, vjp = dvjp)
dvjp(du, u, v, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DAEFunction(dfiip, vjp = dvjp)
-DAEFunction(dfoop, vjp = dvjp)
+DAEFunction(dfoop; vjp = dvjp)
dvjp(res, du, u, v, p, gamma, t) = [1.0]
-DAEFunction(dfiip, vjp = dvjp)
-DAEFunction(dfoop, vjp = dvjp)
-DAEFunction{true, SciMLBase.NoSpecialize}(dfiip, observed = 1)
+DAEFunction(dfiip; vjp = dvjp)
+DAEFunction(dfoop; vjp = dvjp)
+DAEFunction{true, SciMLBase.NoSpecialize}(dfiip; observed = 1)
# DDEFunction
@@ -339,10 +343,10 @@ ddejac(u, h, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefoop, jac = ddejac)
ddejac(u, h, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DDEFunction(ddefiip, jac = ddejac)
-DDEFunction(ddefoop, jac = ddejac)
+DDEFunction(ddefoop; jac = ddejac)
ddejac(du, u, h, p, t) = [1.0]
-DDEFunction(ddefiip, jac = ddejac)
-DDEFunction(ddefoop, jac = ddejac)
+DDEFunction(ddefiip; jac = ddejac)
+DDEFunction(ddefoop; jac = ddejac)
ddeWfact(u, h, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefiip, Wfact = ddeWfact)
@@ -352,10 +356,10 @@ ddeWfact(u, h, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefoop, Wfact = ddeWfact)
ddeWfact(u, h, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DDEFunction(ddefiip, Wfact = ddeWfact)
-DDEFunction(ddefoop, Wfact = ddeWfact)
+DDEFunction(ddefoop; Wfact = ddeWfact)
ddeWfact(du, u, h, p, gamma, t) = [1.0]
-DDEFunction(ddefiip, Wfact = ddeWfact)
-DDEFunction(ddefoop, Wfact = ddeWfact)
+DDEFunction(ddefiip; Wfact = ddeWfact)
+DDEFunction(ddefoop; Wfact = ddeWfact)
ddeWfact_t(u, h, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefiip, Wfact_t = ddeWfact_t)
@@ -366,20 +370,20 @@ ddeWfact_t(u, h, p, t) = [1.0]
ddeWfact_t(u, h, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DDEFunction(ddefiip,
Wfact_t = ddeWfact_t)
-DDEFunction(ddefoop, Wfact_t = Wfact_t)
+DDEFunction(ddefoop; Wfact_t = Wfact_t)
ddeWfact_t(du, u, h, p, gamma, t) = [1.0]
-DDEFunction(ddefiip, Wfact_t = ddeWfact_t)
-DDEFunction(ddefoop, Wfact_t = ddeWfact_t)
+DDEFunction(ddefiip; Wfact_t = ddeWfact_t)
+DDEFunction(ddefoop; Wfact_t = ddeWfact_t)
ddetgrad(u, h, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefiip, tgrad = ddetgrad)
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefoop, tgrad = ddetgrad)
ddetgrad(u, h, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DDEFunction(ddefiip, tgrad = ddetgrad)
-DDEFunction(ddefoop, tgrad = ddetgrad)
+DDEFunction(ddefoop; tgrad = ddetgrad)
ddetgrad(du, u, h, p, t) = [1.0]
-DDEFunction(ddefiip, tgrad = ddetgrad)
-DDEFunction(ddefoop, tgrad = ddetgrad)
+DDEFunction(ddefiip; tgrad = ddetgrad)
+DDEFunction(ddefoop; tgrad = ddetgrad)
ddeparamjac(u, h, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefiip, paramjac = ddeparamjac)
@@ -387,30 +391,30 @@ ddeparamjac(u, h, t) = [1.0]
ddeparamjac(u, h, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DDEFunction(ddefiip,
paramjac = ddeparamjac)
-DDEFunction(ddefoop, paramjac = paramjac)
+DDEFunction(ddefoop; paramjac = paramjac)
ddeparamjac(du, u, h, p, t) = [1.0]
-DDEFunction(ddefiip, paramjac = ddeparamjac)
-DDEFunction(ddefoop, paramjac = ddeparamjac)
+DDEFunction(ddefiip; paramjac = ddeparamjac)
+DDEFunction(ddefoop; paramjac = ddeparamjac)
ddejvp(u, h, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefiip, jvp = ddejvp)
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefoop, jvp = ddejvp)
ddejvp(u, v, h, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DDEFunction(ddefiip, jvp = ddejvp)
-DDEFunction(ddefoop, jvp = ddejvp)
+DDEFunction(ddefoop; jvp = ddejvp)
ddejvp(du, u, v, h, p, t) = [1.0]
-DDEFunction(ddefiip, jvp = ddejvp)
-DDEFunction(ddefoop, jvp = ddejvp)
+DDEFunction(ddefiip; jvp = ddejvp)
+DDEFunction(ddefoop; jvp = ddejvp)
ddevjp(u, h, p, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefiip, vjp = ddevjp)
@test_throws SciMLBase.TooFewArgumentsError DDEFunction(ddefoop, vjp = ddevjp)
ddevjp(u, v, h, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError DDEFunction(ddefiip, vjp = ddevjp)
-DDEFunction(ddefoop, vjp = ddevjp)
+DDEFunction(ddefoop; vjp = ddevjp)
ddevjp(du, u, v, h, p, t) = [1.0]
-DDEFunction(ddefiip, vjp = ddevjp)
-DDEFunction(ddefoop, vjp = ddevjp)
+DDEFunction(ddefiip; vjp = ddevjp)
+DDEFunction(ddefoop; vjp = ddevjp)
# NonlinearFunction
@@ -429,10 +433,10 @@ njac(u) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError NonlinearFunction(nfoop, jac = njac)
njac(u, p) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError NonlinearFunction(nfiip, jac = njac)
-NonlinearFunction(nfoop, jac = njac)
+NonlinearFunction(nfoop; jac = njac)
njac(du, u, p) = [1.0]
-NonlinearFunction(nfiip, jac = njac)
-NonlinearFunction(nfoop, jac = njac)
+NonlinearFunction(nfiip; jac = njac)
+NonlinearFunction(nfoop; jac = njac)
njvp(u) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError NonlinearFunction(nfiip, jvp = njvp)
@@ -442,10 +446,10 @@ njvp(u, p) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError NonlinearFunction(nfoop, jvp = njvp)
njvp(u, v, p) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError NonlinearFunction(nfiip, jvp = njvp)
-NonlinearFunction(nfoop, jvp = njvp)
+NonlinearFunction(nfoop; jvp = njvp)
njvp(du, u, v, p) = [1.0]
-NonlinearFunction(nfiip, jvp = njvp)
-NonlinearFunction(nfoop, jvp = njvp)
+NonlinearFunction(nfiip; jvp = njvp)
+NonlinearFunction(nfoop; jvp = njvp)
nvjp(u) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError NonlinearFunction(nfiip, vjp = nvjp)
@@ -455,10 +459,10 @@ nvjp(u, p) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError NonlinearFunction(nfoop, vjp = nvjp)
nvjp(u, v, p) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError NonlinearFunction(nfiip, vjp = nvjp)
-NonlinearFunction(nfoop, vjp = nvjp)
+NonlinearFunction(nfoop; vjp = nvjp)
nvjp(du, u, v, p) = [1.0]
-NonlinearFunction(nfiip, vjp = nvjp)
-NonlinearFunction(nfoop, vjp = nvjp)
+NonlinearFunction(nfiip; vjp = nvjp)
+NonlinearFunction(nfoop; vjp = nvjp)
# Integrals
intf(u) = 1.0
@@ -535,10 +539,10 @@ bcjac(u, p, t) = [1.0]
bciip,
jac = bjac,
bcjac = bcjac)
-BVPFunction(bfoop, bcoop, jac = bjac)
+BVPFunction(bfoop, bcoop; jac = bjac)
bjac(du, u, p, t) = [1.0]
bcjac(du, u, p, t) = [1.0]
-BVPFunction(bfiip, bciip, jac = bjac, bcjac = bcjac)
+BVPFunction(bfiip, bciip; jac = bjac, bcjac = bcjac)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop,
bciip,
jac = bjac,
@@ -547,7 +551,7 @@ BVPFunction(bfiip, bciip, jac = bjac, bcjac = bcjac)
bcoop,
jac = bjac,
bcjac = bcjac)
-BVPFunction(bfoop, bcoop, jac = bjac, bcjac = bcjac)
+BVPFunction(bfoop, bcoop; jac = bjac, bcjac = bcjac)
bWfact(u, t) = [1.0]
@test_throws SciMLBase.TooFewArgumentsError BVPFunction(bfiip, bciip, Wfact = bWfact)
@@ -559,7 +563,7 @@ bWfact(u, p, gamma, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfiip, bciip, Wfact = bWfact)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, Wfact = bWfact)
bWfact(du, u, p, gamma, t) = [1.0]
-BVPFunction(bfiip, bciip, Wfact = bWfact)
+BVPFunction(bfiip, bciip; Wfact = bWfact)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, Wfact = bWfact)
bWfact_t(u, t) = [1.0]
@@ -576,7 +580,7 @@ bWfact_t(u, p, gamma, t) = [1.0]
bciip,
Wfact_t = bWfact_t)
bWfact_t(du, u, p, gamma, t) = [1.0]
-BVPFunction(bfiip, bciip, Wfact_t = bWfact_t)
+BVPFunction(bfiip, bciip; Wfact_t = bWfact_t)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop,
bciip,
Wfact_t = bWfact_t)
@@ -588,7 +592,7 @@ btgrad(u, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfiip, bciip, tgrad = btgrad)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, tgrad = btgrad)
btgrad(du, u, p, t) = [1.0]
-BVPFunction(bfiip, bciip, tgrad = btgrad)
+BVPFunction(bfiip, bciip; tgrad = btgrad)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, tgrad = btgrad)
bparamjac(u, t) = [1.0]
@@ -602,7 +606,7 @@ bparamjac(u, p, t) = [1.0]
bciip,
paramjac = bparamjac)
bparamjac(du, u, p, t) = [1.0]
-BVPFunction(bfiip, bciip, paramjac = bparamjac)
+BVPFunction(bfiip, bciip; paramjac = bparamjac)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop,
bciip,
paramjac = bparamjac)
@@ -614,7 +618,7 @@ bjvp(u, v, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfiip, bciip, jvp = bjvp)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, jvp = bjvp)
bjvp(du, u, v, p, t) = [1.0]
-BVPFunction(bfiip, bciip, jvp = bjvp)
+BVPFunction(bfiip, bciip; jvp = bjvp)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, jvp = bjvp)
bvjp(u, p, t) = [1.0]
@@ -624,7 +628,7 @@ bvjp(u, v, p, t) = [1.0]
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfiip, bciip, vjp = bvjp)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, vjp = bvjp)
bvjp(du, u, v, p, t) = [1.0]
-BVPFunction(bfiip, bciip, vjp = bvjp)
+BVPFunction(bfiip, bciip; vjp = bvjp)
@test_throws SciMLBase.NonconformingFunctionsError BVPFunction(bfoop, bciip, vjp = bvjp)
@@ -652,9 +656,9 @@ bi1(u) = u
bitoo(y, u, p, a) = y .= p .* u
BatchIntegralFunction(boop)
-BatchIntegralFunction(boop, max_batch = 20)
+BatchIntegralFunction(boop; max_batch = 20)
BatchIntegralFunction(biip, Float64[])
-BatchIntegralFunction(biip, Float64[], max_batch = 20)
+BatchIntegralFunction(biip, Float64[]; max_batch = 20)
@test_throws SciMLBase.IntegrandMismatchFunctionError BatchIntegralFunction(boop, Float64[])
@test_throws SciMLBase.IntegrandMismatchFunctionError BatchIntegralFunction(biip)
diff --git a/test/integrator_tests.jl b/test/integrator_tests.jl
index 23fa9fb8f1..508127527e 100644
--- a/test/integrator_tests.jl
+++ b/test/integrator_tests.jl
@@ -14,7 +14,13 @@ mutable struct DummyIntegrator{Alg, IIP, U, T} <: SciMLBase.DEIntegrator{Alg, II
sol::DummySolution
function DummyIntegrator()
- new{Bool, Bool, Vector{Float64}, Float64}([0.0], 0, [0.0], 0, 1, 1, [],
+ new{Bool, Bool, Vector{Float64}, Float64}([0.0],
+ 0,
+ [0.0],
+ 0,
+ 1,
+ 1,
+ [],
DummySolution(ReturnCode.Default))
end
end
diff --git a/test/python/pythoncall.jl b/test/python/pythoncall.jl
index 7f0758be1c..e9d0b3526a 100644
--- a/test/python/pythoncall.jl
+++ b/test/python/pythoncall.jl
@@ -1,7 +1,7 @@
# PyCall and PythonCall must use the same Python interpreter. This environment variable
# tells PythonCall to use the same Python interpreter as PyCall. See
# https://github.com/JuliaPy/PythonCall.jl/blob/5f56a9b96b867a9f6742ab1d1e2361abd844e19f/docs/src/pycall.md#tips
-ENV["JULIA_PYTHONCALL_EXE"]="@PyCall"
+ENV["JULIA_PYTHONCALL_EXE"] = "@PyCall"
using DifferentialEquations, PythonCall
@@ -18,7 +18,8 @@ using DifferentialEquations, PythonCall
tspan = (0., 1.)
prob = de.ODEProblem(f, u0, tspan)
sol = de.solve(prob)
- """, @__MODULE__)
+ """,
+ @__MODULE__)
@test pyconvert(Any, pyeval("sol", @__MODULE__)) isa ODESolution
pyexec("""
@@ -32,7 +33,8 @@ using DifferentialEquations, PythonCall
p = [10.0,28.0,8/3]
prob = de.ODEProblem(f, u0, tspan, p)
sol = de.solve(prob,saveat=0.01)
- """, @__MODULE__)
+ """,
+ @__MODULE__)
@test pyconvert(Any, pyeval("sol", @__MODULE__)) isa ODESolution
# Test that the types and shapes of sol.t and de.transpose(de.stack(sol.u)) are
@@ -44,7 +46,8 @@ using DifferentialEquations, PythonCall
sol.t[0] == 0 and \
sol.t[-1] == 100 and \
type(u2[4123, 2]) == float
- """, @__MODULE__)
+ """,
+ @__MODULE__)
@test pyconvert(Any, pyeval("ok", @__MODULE__))
@pyexec """
@@ -75,10 +78,12 @@ using DifferentialEquations, PythonCall
tspan = (0.0,1.0)
prob = de.SDEProblem(f,g,u0,tspan)
sol = de.solve(prob,reltol=1e-3,abstol=1e-3)
- """, @__MODULE__)
+ """,
+ @__MODULE__)
end
@testset "promotion" begin
- _u0 = pyconvert(Any, pyeval("""de.SciMLBase.prepare_initial_state([1.0, 0, 0])""", @__MODULE__))
+ _u0 = pyconvert(Any,
+ pyeval("""de.SciMLBase.prepare_initial_state([1.0, 0, 0])""", @__MODULE__))
@test _u0 isa Vector{Float64}
end
diff --git a/test/runtests.jl b/test/runtests.jl
index 0c58fb8416..9c87139b19 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -11,13 +11,13 @@ const is_APPVEYOR = (Sys.iswindows() && haskey(ENV, "APPVEYOR"))
function activate_downstream_env()
Pkg.activate("downstream")
- Pkg.develop(PackageSpec(path = dirname(@__DIR__)))
+ Pkg.develop(PackageSpec(; path = dirname(@__DIR__)))
Pkg.instantiate()
end
function activate_python_env()
Pkg.activate("python")
- Pkg.develop(PackageSpec(path = dirname(@__DIR__)))
+ Pkg.develop(PackageSpec(; path = dirname(@__DIR__)))
Pkg.instantiate()
end
diff --git a/test/solution_interface.jl b/test/solution_interface.jl
index 798aa31d76..bece6a62ff 100644
--- a/test/solution_interface.jl
+++ b/test/solution_interface.jl
@@ -6,10 +6,10 @@ using Test, SciMLBase
end
@testset "plot ODE solution" begin
- f = ODEFunction((u, p, t) -> -u, analytic = (u0, p, t) -> u0 * exp(-t))
+ f = ODEFunction((u, p, t) -> -u; analytic = (u0, p, t) -> u0 * exp(-t))
ode = ODEProblem(f, 1.0, (0.0, 1.0))
sol = SciMLBase.build_solution(ode, :NoAlgorithm, [ode.tspan[begin]], [ode.u0])
- for t in Iterators.drop(range(ode.tspan..., length = 5), 1)
+ for t in Iterators.drop(range(ode.tspan...; length = 5), 1)
push!(sol.t, t)
push!(sol.u, ode.u0)
end
diff --git a/test/solver_missing_error_messages.jl b/test/solver_missing_error_messages.jl
index 813b5cf5de..89b4218c06 100644
--- a/test/solver_missing_error_messages.jl
+++ b/test/solver_missing_error_messages.jl
@@ -5,18 +5,19 @@ prob = OptimizationProblem((x, p) -> sum(x), zeros(2))
struct OptAlg end
SciMLBase.allowscallback(::OptAlg) = false
-@test_throws SciMLBase.IncompatibleOptimizerError solve(prob, OptAlg(),
+@test_throws SciMLBase.IncompatibleOptimizerError solve(prob,
+ OptAlg(),
callback = (args...) -> false)
SciMLBase.requiresbounds(::OptAlg) = true
@test_throws SciMLBase.IncompatibleOptimizerError solve(prob, OptAlg())
SciMLBase.requiresbounds(::OptAlg) = false
-prob = OptimizationProblem((x, p) -> sum(x), zeros(2), lb = [-1.0, -1.0], ub = [1.0, 1.0])
+prob = OptimizationProblem((x, p) -> sum(x), zeros(2); lb = [-1.0, -1.0], ub = [1.0, 1.0])
@test_throws SciMLBase.IncompatibleOptimizerError solve(prob, OptAlg()) #by default allowsbounds is false
cons = (res, x, p) -> (res .= [x[1]^2 + x[2]^2])
-optf = OptimizationFunction((x, p) -> sum(x), SciMLBase.NoAD(), cons = cons)
+optf = OptimizationFunction((x, p) -> sum(x), SciMLBase.NoAD(); cons = cons)
prob = OptimizationProblem(optf, zeros(2))
@test_throws SciMLBase.IncompatibleOptimizerError solve(prob, OptAlg()) #by default allowsconstraints is false