diff --git a/dev/.documenter-siteinfo.json b/dev/.documenter-siteinfo.json index dd4908a88..0790800e6 100644 --- a/dev/.documenter-siteinfo.json +++ b/dev/.documenter-siteinfo.json @@ -1 +1 @@ -{"documenter":{"julia_version":"1.10.0","generation_timestamp":"2024-01-11T16:47:52","documenter_version":"1.2.1"}} \ No newline at end of file +{"documenter":{"julia_version":"1.10.0","generation_timestamp":"2024-01-14T04:01:46","documenter_version":"1.2.1"}} \ No newline at end of file diff --git a/dev/assets/Manifest.toml b/dev/assets/Manifest.toml index 62c3ef125..a5980697d 100644 --- a/dev/assets/Manifest.toml +++ b/dev/assets/Manifest.toml @@ -108,9 +108,9 @@ version = "0.5.1" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra"] -git-tree-sha1 = "2118cb2765f8197b08e5958cdd17c165427425ee" +git-tree-sha1 = "c1deebd76f7a443d527fc0430d5758b8b2112ed8" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.19.0" +version = "1.19.1" weakdeps = ["SparseArrays"] [deps.ChainRulesCore.extensions] @@ -146,9 +146,9 @@ version = "0.3.0" [[deps.Compat]] deps = ["TOML", "UUIDs"] -git-tree-sha1 = "ed2ebb1ff7550226ddb584ba8352facf8d9ffb22" +git-tree-sha1 = "75bd5b6fc5089df449b5d35fa501c846c9b6549b" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.11.0" +version = "4.12.0" weakdeps = ["Dates", "LinearAlgebra"] [deps.Compat.extensions] @@ -465,9 +465,9 @@ version = "1.3.0" [[deps.Git_jll]] deps = ["Artifacts", "Expat_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Zlib_jll"] -git-tree-sha1 = "bb8f7cc77ec1152414b2af6db533d9471cfbb2d1" +git-tree-sha1 = "b30c473c97fcc1e1e44fab8f3e88fd1b89c9e9d1" uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb" -version = "2.42.0+0" +version = "2.43.0+0" [[deps.Glob]] git-tree-sha1 = "97285bbd5230dd766e9ef6749b80fc617126d496" @@ -1016,9 +1016,9 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[deps.QuadGK]] deps = ["DataStructures", "LinearAlgebra"] -git-tree-sha1 = "9ebcd48c498668c7fa0e97a9cae873fbee7bfee1" +git-tree-sha1 = "9b23c31e76e333e6fb4c1595ae6afa74966a729e" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -version = "2.9.1" +version = "2.9.4" [[deps.REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] @@ -1120,7 +1120,7 @@ version = "0.6.42" deps = ["ADTypes", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FillArrays", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLOperators", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables", "TruncatedStacktraces"] path = "/home/runner/work/SciMLBase.jl/SciMLBase.jl" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -version = "2.18.0" +version = "2.19.0" [deps.SciMLBase.extensions] SciMLBaseChainRulesCoreExt = "ChainRulesCore" @@ -1160,9 +1160,9 @@ uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" [[deps.SimpleNonlinearSolve]] deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "DiffEqBase", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "MaybeInplace", "PrecompileTools", "Reexport", "SciMLBase", "StaticArraysCore"] -git-tree-sha1 = "8d672bd91dc432fb286b6d4bcf1a5dc417e932a3" +git-tree-sha1 = "06dc9a74cd2b667b921c20e53631d36ea42be912" uuid = "727e6d20-b764-4bd8-a329-72de5adea6c7" -version = "1.2.0" +version = "1.2.1" [deps.SimpleNonlinearSolve.extensions] SimpleNonlinearSolvePolyesterForwardDiffExt = "PolyesterForwardDiff" @@ -1246,9 +1246,9 @@ weakdeps = ["OffsetArrays", "StaticArrays"] [[deps.StaticArrays]] deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] -git-tree-sha1 = "4e17a790909b17f7bf1496e3aec138cf01b60b3b" +git-tree-sha1 = "f68dd04d131d9a8a8eb836173ee8f105c360b0c5" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.9.0" +version = "1.9.1" weakdeps = ["ChainRulesCore", "Statistics"] [deps.StaticArrays.extensions] diff --git a/dev/fundamentals/FAQ/index.html b/dev/fundamentals/FAQ/index.html index 263f4085a..0bcca1a38 100644 --- a/dev/fundamentals/FAQ/index.html +++ b/dev/fundamentals/FAQ/index.html @@ -1,2 +1,2 @@ -Frequently Asked Questions · SciMLBase.jl

Frequently Asked Questions

What are the code styling rules for SciML?

All SciML libraries are supposed to follow SciMLStyle. Any deviation from that style is something to be fixed.

Where do I find more information on the internals of some packages?

The SciML Developer Documentation describes the internals of some of the larger solver libraries at length.

What are the community practices that SciML developers should use?

See ColPrac: Contributor's Guide on Collaborative Practices for Community Packages

Are there developer programs to help fund parties interested in helping develop SciML?

Yes! See the SciML Developer Programs webpage.

+Frequently Asked Questions · SciMLBase.jl

Frequently Asked Questions

What are the code styling rules for SciML?

All SciML libraries are supposed to follow SciMLStyle. Any deviation from that style is something to be fixed.

Where do I find more information on the internals of some packages?

The SciML Developer Documentation describes the internals of some of the larger solver libraries at length.

What are the community practices that SciML developers should use?

See ColPrac: Contributor's Guide on Collaborative Practices for Community Packages

Are there developer programs to help fund parties interested in helping develop SciML?

Yes! See the SciML Developer Programs webpage.

diff --git a/dev/index.html b/dev/index.html index dff37bc44..5a694edd0 100644 --- a/dev/index.html +++ b/dev/index.html @@ -2,7 +2,7 @@ Home · SciMLBase.jl

The SciML Common Interface for Julia Equation Solvers

The SciML common interface ties together the numerical solvers of the Julia package ecosystem into a single unified interface. It is designed for maximal efficiency and parallelism, while incorporating essential features for large-scale scientific machine learning such as differentiability, composability, and sparsity.

This documentation is made to pool together the docs of the various SciML libraries to paint the overarching picture, establish development norms, and document the shared/common functionality.

Domains of SciML

The SciML common interface covers the following domains:

  • Linear systems (LinearProblem)
    • Direct methods for dense and sparse
    • Iterative solvers with preconditioning
  • Nonlinear Systems (NonlinearProblem)
    • Rootfinding for systems of nonlinear equations
  • Interval Nonlinear Systems
    • Bracketing rootfinders for nonlinear equations with interval bounds
  • Integrals (quadrature) (IntegralProblem)
  • Differential Equations
    • Discrete equations (function maps, discrete stochastic (Gillespie/Markov) simulations) (DiscreteProblem)
    • Ordinary differential equations (ODEs) (ODEProblem)
    • Split and Partitioned ODEs (Symplectic integrators, IMEX Methods) (SplitODEProblem)
    • Stochastic ordinary differential equations (SODEs or SDEs) (SDEProblem)
    • Stochastic differential-algebraic equations (SDAEs) (SDEProblem with mass matrices)
    • Random differential equations (RODEs or RDEs) (RODEProblem)
    • Differential algebraic equations (DAEs) (DAEProblem and ODEProblem with mass matrices)
    • Delay differential equations (DDEs) (DDEProblem)
    • Neutral, retarded, and algebraic delay differential equations (NDDEs, RDDEs, and DDAEs)
    • Stochastic delay differential equations (SDDEs) (SDDEProblem)
    • Experimental support for stochastic neutral, retarded, and algebraic delay differential equations (SNDDEs, SRDDEs, and SDDAEs)
    • Mixed discrete and continuous equations (Hybrid Equations, Jump Diffusions) (AbstractDEProblems with callbacks)
  • Optimization (OptimizationProblem)
    • Nonlinear (constrained) optimization
  • (Stochastic/Delay/Differential-Algebraic) Partial Differential Equations (PDESystem)
    • Finite difference and finite volume methods
    • Interfaces to finite element methods
    • Physics-Informed Neural Networks (PINNs)
    • Integro-Differential Equations
    • Fractional Differential Equations

The SciML common interface also includes ModelingToolkit.jl for defining such systems symbolically, allowing for optimizations like automated generation of parallel code, symbolic simplification, and generation of sparsity patterns.

Extended SciML Domain

In addition to the purely numerical representations of mathematical objects, there are also sets of problem types associated with common mathematical algorithms. These are:

  • Data-driven modeling
    • Discrete-time data-driven dynamical systems (DiscreteDataDrivenProblem)
    • Continuous-time data-driven dynamical systems (ContinuousDataDrivenProblem)
    • Symbolic regression (DirectDataDrivenProblem)
  • Uncertainty quantification and expected values (ExpectationProblem)

Inverse Problems, Parameter Estimation, and Structural Identification

We note that parameter estimation and inverse problems are solved directly on their constituent problem types using tools like DiffEqFlux.jl. Thus for example, there is no ODEInverseProblem, and instead ODEProblem is used to find the parameters p that solve the inverse problem.

Common Interface High Level

The SciML interface is common as the usage of arguments is standardized across all of the problem domains. Underlying high level ideas include:

  • All domains use the same interface of defining a AbstractSciMLProblem which is then solved via solve(prob,alg;kwargs), where alg is a AbstractSciMLAlgorithm. The keyword argument namings are standardized across the organization.
  • AbstractSciMLProblems are generally defined by a SciMLFunction which can define extra details about a model function, such as its analytical Jacobian, its sparsity patterns and so on.
  • There is an organization-wide method for defining linear and nonlinear solvers used within other solvers, giving maximum control of performance to the user.
  • Types used within the packages are defined by the input types. For example, packages attempt to internally use the type of the initial condition as the type for the state within differential equation solvers.
  • solve calls should be thread-safe and parallel-safe.
  • init(prob,alg;kwargs) returns an iterator which allows for directly iterating over the solution process
  • High performance is key. Any performance that is not at the top level is considered a bug and should be reported as such.
  • All functions have an in-place and out-of-place form, where the in-place form is made to utilize mutation for high performance on large-scale problems and the out-of-place form is for compatibility with tooling like static arrays and some reverse-mode automatic differentiation systems.

User-Facing Solver Libraries

  • DifferentialEquations.jl
    • Multi-package interface of high performance numerical solvers of differential equations
  • ModelingToolkit.jl
    • The symbolic modeling package which implements the SciML symbolic common interface.
  • LinearSolve.jl
    • Multi-package interface for specifying linear solvers (direct, sparse, and iterative), along with tools for caching and preconditioners for use in large-scale modeling.
  • NonlinearSolve.jl
    • High performance numerical solving of nonlinear systems.
  • Integrals.jl
    • Multi-package interface for high performance, batched, and parallelized numerical quadrature.
  • Optimization.jl
    • Multi-package interface for numerical solving of optimization problems.
  • NeuralPDE.jl
    • Physics-Informed Neural Network (PINN) package for transforming partial differential equations into optimization problems.
  • DiffEqOperators.jl
    • Automated finite difference method (FDM) package for transforming partial differential equations into nonlinear problems and ordinary differential equations.
  • DiffEqFlux.jl
    • High level package for scientific machine learning applications, such as neural and universal differential equations, solving of inverse problems, parameter estimation, nonlinear optimal control, and more.
  • DataDrivenDiffEq.jl
    • Multi-package interface for data-driven modeling, Koopman dynamic mode decomposition, symbolic regression/sparsification, and automated model discovery.
  • SciMLExpectations.jl
    • Extension to the dynamical modeling tools for calculating expectations.

Interface Implementation Libraries

  • SciMLBase.jl
    • The core package defining the interface which is consumed by the modeling and solver packages.
  • DiffEqBase.jl
    • The core package defining the extended interface which is consumed by the differential equation solver packages.
  • SciMLSensitivity.jl
    • A package which pools together the definition of derivative overloads to define the common sensealg automatic differentiation interface.
  • DiffEqNoiseProcess.jl
    • A package which defines the stochastic AbstractNoiseProcess interface for the SciML ecosystem.
  • RecursiveArrayTools.jl
    • A package which defines the underlying AbstractVectorOfArray structure used as the output for all time series results.
  • ArrayInterface.jl
    • The package which defines the extended AbstractArray interface employed throughout the SciML ecosystem.

Using-Facing Modeling Libraries

There are too many to name here and this will be populated when there is time!

Flowchart Example for PDE-Constrained Optimal Control

The following example showcases how the pieces of the common interface connect to solve a problem that mixes inference, symbolics, and numerics.

External Binding Libraries

  • diffeqr
    • Solving differential equations in R using DifferentialEquations.jl with ModelingToolkit for JIT compilation and GPU-acceleration
  • diffeqpy
    • Solving differential equations in Python using DifferentialEquations.jl

Solver Libraries

There are too many to name here. Check out the SciML Organization Github Page for details.

Contributing

Reproducibility

The documentation of this SciML package was built using these direct dependencies,
Status `~/work/SciMLBase.jl/SciMLBase.jl/docs/Project.toml`
   [e30172f5] Documenter v1.2.1
   [961ee093] ModelingToolkit v8.75.0
-  [0bca4576] SciMLBase v2.18.0 `~/work/SciMLBase.jl/SciMLBase.jl`
and using this machine and Julia version.
Julia Version 1.10.0
+  [0bca4576] SciMLBase v2.19.0 `~/work/SciMLBase.jl/SciMLBase.jl`
and using this machine and Julia version.
Julia Version 1.10.0
 Commit 3120989f39b (2023-12-25 18:01 UTC)
 Build Info:
   Official https://julialang.org/ release
@@ -25,13 +25,13 @@
   [2a0fbf3d] CPUSummary v0.2.4
   [00ebfdb7] CSTParser v3.4.0
   [49dc2e85] Calculus v0.5.1
-  [d360d2e6] ChainRulesCore v1.19.0
+  [d360d2e6] ChainRulesCore v1.19.1
   [fb6a15b2] CloseOpenIntervals v0.1.12
   [861a8166] Combinatorics v1.0.2
   [a80b9123] CommonMark v0.8.12
   [38540f10] CommonSolve v0.2.4
   [bbf7d656] CommonSubexpressions v0.3.0
-  [34da2185] Compat v4.11.0
+  [34da2185] Compat v4.12.0
   [b152e2b5] CompositeTypes v0.1.3
   [2569d6c7] ConcreteStructs v0.2.3
   [187b0558] ConstructionBase v1.5.4
@@ -124,7 +124,7 @@
   [d236fae5] PreallocationTools v0.4.17
   [aea7be01] PrecompileTools v1.2.0
   [21216c6a] Preferences v1.4.1
-  [1fd47b50] QuadGK v2.9.1
+  [1fd47b50] QuadGK v2.9.4
   [e6cf234a] RandomNumbers v1.5.3
   [3cdcf5f2] RecipesBase v1.3.4
   [731186ca] RecursiveArrayTools v3.5.2
@@ -136,10 +136,10 @@
   [7e49a35a] RuntimeGeneratedFunctions v0.5.12
   [94e857df] SIMDTypes v0.1.0
   [476501e8] SLEEFPirates v0.6.42
-  [0bca4576] SciMLBase v2.18.0 `~/work/SciMLBase.jl/SciMLBase.jl`
+  [0bca4576] SciMLBase v2.19.0 `~/work/SciMLBase.jl/SciMLBase.jl`
   [c0aeaf25] SciMLOperators v0.3.7
   [efcf1570] Setfield v1.1.1
-  [727e6d20] SimpleNonlinearSolve v1.2.0
+  [727e6d20] SimpleNonlinearSolve v1.2.1
   [699a6c99] SimpleTraits v0.9.4
   [ce78b400] SimpleUnPack v1.1.0
   [a2af1166] SortingAlgorithms v1.2.1
@@ -148,7 +148,7 @@
   [276daf66] SpecialFunctions v2.3.1
   [aedffcd0] Static v0.8.8
   [0d7ed370] StaticArrayInterface v1.5.0
-  [90137ffa] StaticArrays v1.9.0
+  [90137ffa] StaticArrays v1.9.1
   [1e83bf80] StaticArraysCore v1.4.2
   [82ae8749] StatsAPI v1.7.0
   [2913bbd2] StatsBase v0.34.2
@@ -172,7 +172,7 @@
   [3d5dd08c] VectorizationBase v0.21.65
   [19fa3120] VertexSafeGraphs v0.2.0
   [2e619515] Expat_jll v2.5.0+0
-  [f8c6e375] Git_jll v2.42.0+0
+  [f8c6e375] Git_jll v2.43.0+0
   [1d5cc7b8] IntelOpenMP_jll v2024.0.2+0
   [94ce4f54] Libiconv_jll v1.17.0+0
   [856f044c] MKL_jll v2024.0.0+0
@@ -227,4 +227,4 @@
   [8e850b90] libblastrampoline_jll v5.8.0+1
   [8e850ede] nghttp2_jll v1.52.0+1
   [3f19e933] p7zip_jll v17.4.0+2
-Info Packages marked with ⌃ and ⌅ have new versions available. Those with ⌃ may be upgradable, but those with ⌅ are restricted by compatibility constraints from upgrading. To see why use `status --outdated -m`

You can also download the manifest file and the project file.

+Info Packages marked with ⌃ and ⌅ have new versions available. Those with ⌃ may be upgradable, but those with ⌅ are restricted by compatibility constraints from upgrading. To see why use `status --outdated -m`

You can also download the manifest file and the project file.

diff --git a/dev/interfaces/Algorithms/index.html b/dev/interfaces/Algorithms/index.html index 697b0ebcb..4507374c4 100644 --- a/dev/interfaces/Algorithms/index.html +++ b/dev/interfaces/Algorithms/index.html @@ -1,2 +1,2 @@ -SciMLAlgorithms · SciMLBase.jl

SciMLAlgorithms

Definition of the AbstractSciMLAlgorithm Interface

SciMLAlgorithms are defined as types which have dispatches to the function signature:

CommonSolve.solve(prob::AbstractSciMLProblem,alg::AbstractSciMLAlgorithm;kwargs...)

Algorithm-Specific Arguments

Note that because the keyword arguments of solve are designed to be common across the whole problem type, algorithms should have the algorithm-specific keyword arguments defined as part of the algorithm constructor. For example, Rodas5 has a choice of autodiff::Bool which is not common across all ODE solvers, and thus autodiff is a algorithm-specific keyword argument handled via Rodas5(autodiff=true).

Remake

Note that remake is applicable to AbstractSciMLAlgorithm types, but this is not used in the public API. It's used for solvers to swap out components like ForwardDiff chunk sizes.

Common Algorithm Keyword Arguments

Commonly used algorithm keyword arguments are:

Traits

SciMLBase.isautodifferentiableFunction

isautodifferentiable(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.

Defaults to false as only pure-Julia algorithms can have this be true.

source
SciMLBase.allows_arbitrary_number_typesFunction

allowsarbitrarynumber_types(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.

Defaults to false as only pure-Julia algorithms can have this be true.

source
SciMLBase.allowscomplexFunction

allowscomplex(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm is compatible with having complex numbers as the state variables.

Defaults to false.

source
SciMLBase.isadaptiveFunction

isadaptive(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm uses adaptivity, i.e. has a non-quasi-static compute graph.

Defaults to true.

source
is_integrator_adaptive(i::DEIntegrator)

Checks if the integrator is adaptive

source
SciMLBase.isdiscreteFunction

isdiscrete(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm allows for discrete state values, such as integers.

Defaults to false.

source
SciMLBase.forwarddiffs_modelFunction

forwarddiffs_model(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm uses ForwardDiff.jl on the model function is called with ForwardDiff.jl

Defaults to false as only pure-Julia algorithms can have this be true.

source
SciMLBase.forwarddiffs_model_timeFunction

forwarddiffsmodeltime(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm uses ForwardDiff.jl on the model f(u,p,t) function is called with ForwardDiff.jl on the t argument.

Defaults to false as only a few pure-Julia algorithms (Rosenbrock methods) have this as true

source

Abstract SciML Algorithms

+SciMLAlgorithms · SciMLBase.jl

SciMLAlgorithms

Definition of the AbstractSciMLAlgorithm Interface

SciMLAlgorithms are defined as types which have dispatches to the function signature:

CommonSolve.solve(prob::AbstractSciMLProblem,alg::AbstractSciMLAlgorithm;kwargs...)

Algorithm-Specific Arguments

Note that because the keyword arguments of solve are designed to be common across the whole problem type, algorithms should have the algorithm-specific keyword arguments defined as part of the algorithm constructor. For example, Rodas5 has a choice of autodiff::Bool which is not common across all ODE solvers, and thus autodiff is a algorithm-specific keyword argument handled via Rodas5(autodiff=true).

Remake

Note that remake is applicable to AbstractSciMLAlgorithm types, but this is not used in the public API. It's used for solvers to swap out components like ForwardDiff chunk sizes.

Common Algorithm Keyword Arguments

Commonly used algorithm keyword arguments are:

Traits

SciMLBase.isautodifferentiableFunction

isautodifferentiable(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.

Defaults to false as only pure-Julia algorithms can have this be true.

source
SciMLBase.allows_arbitrary_number_typesFunction

allowsarbitrarynumber_types(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.

Defaults to false as only pure-Julia algorithms can have this be true.

source
SciMLBase.allowscomplexFunction

allowscomplex(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm is compatible with having complex numbers as the state variables.

Defaults to false.

source
SciMLBase.isadaptiveFunction

isadaptive(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm uses adaptivity, i.e. has a non-quasi-static compute graph.

Defaults to true.

source
is_integrator_adaptive(i::DEIntegrator)

Checks if the integrator is adaptive

source
SciMLBase.isdiscreteFunction

isdiscrete(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm allows for discrete state values, such as integers.

Defaults to false.

source
SciMLBase.forwarddiffs_modelFunction

forwarddiffs_model(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm uses ForwardDiff.jl on the model function is called with ForwardDiff.jl

Defaults to false as only pure-Julia algorithms can have this be true.

source
SciMLBase.forwarddiffs_model_timeFunction

forwarddiffsmodeltime(alg::AbstractDEAlgorithm)

Trait declaration for whether an algorithm uses ForwardDiff.jl on the model f(u,p,t) function is called with ForwardDiff.jl on the t argument.

Defaults to false as only a few pure-Julia algorithms (Rosenbrock methods) have this as true

source

Abstract SciML Algorithms

diff --git a/dev/interfaces/Array_and_Number/index.html b/dev/interfaces/Array_and_Number/index.html index 891075d55..59181ff05 100644 --- a/dev/interfaces/Array_and_Number/index.html +++ b/dev/interfaces/Array_and_Number/index.html @@ -1,2 +1,2 @@ -SciML Container (Array) and Number Interfaces · SciMLBase.jl

SciML Container (Array) and Number Interfaces

We live in a society, and therefore there are rules. In this tutorial we outline the rules which are required on container and number types which are allowable in SciML tools.

Warn

In general as of 2023, strict adherence to this interface is an early work-in-progress. If anything does not conform to the documented interface, please open an issue.

Note

There are many types which can work with a specific solver that do satisfy this interface. Many times as part of prototyping you may want to side-step the high level interface checks in order to simply test whether a new type is working. To do this, set interface_checks = false as a keyword argument to init/solve to bypass any of the internal interface checks. This means you will no longer get a nice high-level error message and instead it will attempt to use the type without restrictions. Note that not every problem/solver has implemented this new keyword argument as of 2023.

Note About Wrapped Solvers

Due to limitations of wrapped solvers, any solver that is a wrapped solver from an existing C/Fortran code is inherently limited to Float64 and Vector{Float64} for its operations. This includes packages like Sundials.jl, LSODA.jl, DASKR.jl, MINPACK.jl, and many more. This is fundamental to these solvers and it is not expected that they will allow the full set of SciML types in the future. If more abstract number/container definitions are required, then these are not the appropriate solvers to use.

SciML Number Types

The number types are the types used to define the dependent variables (i.e. u0) and the independent variables (t or tspan). These two types can be different, and can have different restrictions depending on the type of solver which is employed. The following rules for a Number type are held in general:

  • Number types can be used in SciML directly or in containers. If a problem defines a value like u0 using a Number type, the out-of-place form must be used for the problem definition.
  • x::T + y::T = z::T
  • x::T * y::T = z::T
  • oneunit(x::T)::T
  • one(x::T) * oneunit(x::T) = z::T
  • t::T2 * x::T + y::T = z::T for T2 a time type and T the dependent variable type (this includes the muladd equivalent form).

Additionally, the following rules apply to subsets of uses:

Adaptive Number Types

  • x::T / y::T = z::T
  • Default choices of norms can assume sqrt(x::T)::T exists. If internalnorm is overridden then this may not be required (for example, changing the norm to inf-norm).
  • x::T ^ y::T = z::T

Time Types (Independent Variables)

  • If a solver is time adaptive, the time type must be a floating point number. Rational is only allowed for non-adaptive solves.

SciML Container (Array) Types

Container types are types which hold number types. They can be used to define objects like the state vector (u0) of a problem. The following operations are required in a container type to be used with SciML solvers:

Note

"eltype(x::T)::T2 is a compatible Number type" excludes Array{Array{T}} types of types. However, recursive vectors can conformed to the interface with zero overhead using tools from RecursiveArrayTools.jl such as VectorOfArray(x). Since this greatly simplifies the interfaces and the ability to check for correctness, doing this wrapping is highly recommended and there are no plans to relax this requirement.

Additionally, the following rules apply to subsets of uses:

SciML Mutable Array Types

  • similar(x::T)::T
  • zero(x::T)::T
  • z::T .= x::T .+ y::T is defined
  • z::T .= x::T .* y::T is defined
  • z::T .= t::T2 .* x::T where T2 is the time type (a Number) and T is the container type.
  • (Optional) Base.resize!(x,i) is required for resize!(integrator,i) to be supported.

SciML Matrix (Operator) Type

Note that the matrix type may not match the type of the initial container u0. An example is ComponentMatrix as the matrix structure corresponding to a ComponentArray. However, the following actions are assumed to hold on the resulting matrix type:

  • solve(LinearProblem(A::T,b::T2),linsolve) must be defined for a solver to work on a given SciML matrix type T2.
  • If the matrix is an operator, i.e. a lazy construct, it should conform to the SciMLOperators interface.
  • If not a SciMLOperator, diagind(W::T) should be defined and @view(A[idxs])=@view(A[idxs]) + λ::T
+SciML Container (Array) and Number Interfaces · SciMLBase.jl

SciML Container (Array) and Number Interfaces

We live in a society, and therefore there are rules. In this tutorial we outline the rules which are required on container and number types which are allowable in SciML tools.

Warn

In general as of 2023, strict adherence to this interface is an early work-in-progress. If anything does not conform to the documented interface, please open an issue.

Note

There are many types which can work with a specific solver that do satisfy this interface. Many times as part of prototyping you may want to side-step the high level interface checks in order to simply test whether a new type is working. To do this, set interface_checks = false as a keyword argument to init/solve to bypass any of the internal interface checks. This means you will no longer get a nice high-level error message and instead it will attempt to use the type without restrictions. Note that not every problem/solver has implemented this new keyword argument as of 2023.

Note About Wrapped Solvers

Due to limitations of wrapped solvers, any solver that is a wrapped solver from an existing C/Fortran code is inherently limited to Float64 and Vector{Float64} for its operations. This includes packages like Sundials.jl, LSODA.jl, DASKR.jl, MINPACK.jl, and many more. This is fundamental to these solvers and it is not expected that they will allow the full set of SciML types in the future. If more abstract number/container definitions are required, then these are not the appropriate solvers to use.

SciML Number Types

The number types are the types used to define the dependent variables (i.e. u0) and the independent variables (t or tspan). These two types can be different, and can have different restrictions depending on the type of solver which is employed. The following rules for a Number type are held in general:

  • Number types can be used in SciML directly or in containers. If a problem defines a value like u0 using a Number type, the out-of-place form must be used for the problem definition.
  • x::T + y::T = z::T
  • x::T * y::T = z::T
  • oneunit(x::T)::T
  • one(x::T) * oneunit(x::T) = z::T
  • t::T2 * x::T + y::T = z::T for T2 a time type and T the dependent variable type (this includes the muladd equivalent form).

Additionally, the following rules apply to subsets of uses:

Adaptive Number Types

  • x::T / y::T = z::T
  • Default choices of norms can assume sqrt(x::T)::T exists. If internalnorm is overridden then this may not be required (for example, changing the norm to inf-norm).
  • x::T ^ y::T = z::T

Time Types (Independent Variables)

  • If a solver is time adaptive, the time type must be a floating point number. Rational is only allowed for non-adaptive solves.

SciML Container (Array) Types

Container types are types which hold number types. They can be used to define objects like the state vector (u0) of a problem. The following operations are required in a container type to be used with SciML solvers:

Note

"eltype(x::T)::T2 is a compatible Number type" excludes Array{Array{T}} types of types. However, recursive vectors can conformed to the interface with zero overhead using tools from RecursiveArrayTools.jl such as VectorOfArray(x). Since this greatly simplifies the interfaces and the ability to check for correctness, doing this wrapping is highly recommended and there are no plans to relax this requirement.

Additionally, the following rules apply to subsets of uses:

SciML Mutable Array Types

  • similar(x::T)::T
  • zero(x::T)::T
  • z::T .= x::T .+ y::T is defined
  • z::T .= x::T .* y::T is defined
  • z::T .= t::T2 .* x::T where T2 is the time type (a Number) and T is the container type.
  • (Optional) Base.resize!(x,i) is required for resize!(integrator,i) to be supported.

SciML Matrix (Operator) Type

Note that the matrix type may not match the type of the initial container u0. An example is ComponentMatrix as the matrix structure corresponding to a ComponentArray. However, the following actions are assumed to hold on the resulting matrix type:

  • solve(LinearProblem(A::T,b::T2),linsolve) must be defined for a solver to work on a given SciML matrix type T2.
  • If the matrix is an operator, i.e. a lazy construct, it should conform to the SciMLOperators interface.
  • If not a SciMLOperator, diagind(W::T) should be defined and @view(A[idxs])=@view(A[idxs]) + λ::T
diff --git a/dev/interfaces/Common_Keywords/index.html b/dev/interfaces/Common_Keywords/index.html index 8373c1d34..ae0f540ec 100644 --- a/dev/interfaces/Common_Keywords/index.html +++ b/dev/interfaces/Common_Keywords/index.html @@ -1,2 +1,2 @@ -Common Keyword Arguments · SciMLBase.jl

Common Keyword Arguments

The following defines the keyword arguments which are meant to be preserved throughout all of the AbstractSciMLProblem cases (where applicable).

Default Algorithm Hinting

To help choose the default algorithm, the keyword argument alg_hints is provided to solve. alg_hints is a Vector{Symbol} which describe the problem at a high level to the solver. The options are:

This functionality is derived via the benchmarks in SciMLBenchmarks.jl

Currently this is only implemented for the differential equation solvers.

Output Control

These arguments control the output behavior of the solvers. It defaults to maximum output to give the best interactive user experience, but can be reduced all the way to only saving the solution at the final timepoint.

The following options are all related to output control. See the "Examples" section at the end of this page for some example usage.

  • dense: Denotes whether to save the extra pieces required for dense (continuous) output. Default is save_everystep && !isempty(saveat) for algorithms which have the ability to produce dense output, i.e. by default it's true unless the user has turned off saving on steps or has chosen a saveat value. If dense=false, the solution still acts like a function, and sol(t) is a linear interpolation between the saved time points.
  • saveat: Denotes specific times to save the solution at, during the solving phase. The solver will save at each of the timepoints in this array in the most efficient manner available to the solver. If only saveat is given, then the arguments save_everystep and dense are false by default. If saveat is given a number, then it will automatically expand to tspan[1]:saveat:tspan[2]. For methods where interpolation is not possible, saveat may be equivalent to tstops. The default value is [].
  • save_idxs: Denotes the indices for the components of the equation to save. Defaults to saving all indices. For example, if you are solving a 3-dimensional ODE, and given save_idxs = [1, 3], only the first and third components of the solution will be outputted. Notice that of course in this case the outputted solution will be two-dimensional.
  • tstops: Denotes extra times that the timestepping algorithm must step to. This should be used to help the solver deal with discontinuities and singularities, since stepping exactly at the time of the discontinuity will improve accuracy. If a method cannot change timesteps (fixed timestep multistep methods), then tstops will use an interpolation, matching the behavior of saveat. If a method cannot change timesteps and also cannot interpolate, then tstops must be a multiple of dt or else an error will be thrown. Default is [].
  • d_discontinuities: Denotes locations of discontinuities in low order derivatives. This will force FSAL algorithms which assume derivative continuity to re-evaluate the derivatives at the point of discontinuity. The default is [].
  • save_everystep: Saves the result at every step. Default is true if isempty(saveat).
  • save_on: Denotes whether intermediate solutions are saved. This overrides the settings of dense, saveat and save_everystep and is used by some applications to manually turn off saving temporarily. Everyday use of the solvers should leave this unchanged. Defaults to true.
  • save_start: Denotes whether the initial condition should be included in the solution type as the first timepoint. Defaults to true.
  • save_end: Denotes whether the final timepoint is forced to be saved, regardless of the other saving settings. Defaults to true.
  • initialize_save: Denotes whether to save after the callback initialization phase (when u_modified=true). Defaults to true.

Note that dense requires save_everystep=true and saveat=false.

Stepsize Control

These arguments control the timestepping routines.

Basic Stepsize Control

  • adaptive: Turns on adaptive timestepping for appropriate methods. Default is true.
  • abstol: Absolute tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).
  • reltol: Relative tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).
  • dt: Sets the initial stepsize. This is also the stepsize for fixed timestep methods. Defaults to an automatic choice if the method is adaptive.
  • dtmax: Maximum dt for adaptive timestepping. Defaults are package-dependent.
  • dtmin: Minimum dt for adaptive timestepping. Defaults are package-dependent.

Fixed Stepsize Usage

Note that if a method does not have adaptivity, the following rules apply:

  • If dt is set, then the algorithm will step with size dt each iteration.
  • If tstops and dt are both set, then the algorithm will step with either a size dt, or use a smaller step to hit the tstops point.
  • If tstops is set without dt, then the algorithm will step directly to each value in tstops
  • If neither dt nor tstops are set, the solver will throw an error.

Memory Optimizations

  • alias_u0: allows the solver to alias the initial condition array that is contained in the problem struct. Defaults to false.
  • cache: pass a solver cache to decrease the construction time. This is not implemented for any of the problem interfaces at this moment.

Miscellaneous

  • maxiters: Maximum number of iterations before stopping.
  • callback: Specifies a callback function that is called between iterations.
  • verbose: Toggles whether warnings are thrown when the solver exits early. Defaults to true.

Progress Monitoring

These arguments control the usage of the progressbar in the logger.

  • progress: Turns on/off the Juno progressbar. Default is false.
  • progress_steps: Numbers of steps between updates of the progress bar. Default is 1000.
  • progress_name: Controls the name of the progressbar. Default is the name of the problem type.
  • progress_message: Controls the message with the progressbar. Defaults to showing dt, t, the maximum of u.

The progress bars all use the Julia Logging interface in order to be generic to the IDE or programming tool that is used. For more information on how this is all put together, see this discussion.

Error Calculations

If you are using the test problems (i.e. SciMLFunctions where f.analytic is defined), then options control the errors which are calculated. By default, any cheap error estimates are always calculated. Extra keyword arguments include:

  • timeseries_errors
  • dense_errors

for specifying more expensive errors.

Automatic Differentiation Control

See the Automatic Differentiation page for a full description of sensealg

+Common Keyword Arguments · SciMLBase.jl

Common Keyword Arguments

The following defines the keyword arguments which are meant to be preserved throughout all of the AbstractSciMLProblem cases (where applicable).

Default Algorithm Hinting

To help choose the default algorithm, the keyword argument alg_hints is provided to solve. alg_hints is a Vector{Symbol} which describe the problem at a high level to the solver. The options are:

This functionality is derived via the benchmarks in SciMLBenchmarks.jl

Currently this is only implemented for the differential equation solvers.

Output Control

These arguments control the output behavior of the solvers. It defaults to maximum output to give the best interactive user experience, but can be reduced all the way to only saving the solution at the final timepoint.

The following options are all related to output control. See the "Examples" section at the end of this page for some example usage.

  • dense: Denotes whether to save the extra pieces required for dense (continuous) output. Default is save_everystep && !isempty(saveat) for algorithms which have the ability to produce dense output, i.e. by default it's true unless the user has turned off saving on steps or has chosen a saveat value. If dense=false, the solution still acts like a function, and sol(t) is a linear interpolation between the saved time points.
  • saveat: Denotes specific times to save the solution at, during the solving phase. The solver will save at each of the timepoints in this array in the most efficient manner available to the solver. If only saveat is given, then the arguments save_everystep and dense are false by default. If saveat is given a number, then it will automatically expand to tspan[1]:saveat:tspan[2]. For methods where interpolation is not possible, saveat may be equivalent to tstops. The default value is [].
  • save_idxs: Denotes the indices for the components of the equation to save. Defaults to saving all indices. For example, if you are solving a 3-dimensional ODE, and given save_idxs = [1, 3], only the first and third components of the solution will be outputted. Notice that of course in this case the outputted solution will be two-dimensional.
  • tstops: Denotes extra times that the timestepping algorithm must step to. This should be used to help the solver deal with discontinuities and singularities, since stepping exactly at the time of the discontinuity will improve accuracy. If a method cannot change timesteps (fixed timestep multistep methods), then tstops will use an interpolation, matching the behavior of saveat. If a method cannot change timesteps and also cannot interpolate, then tstops must be a multiple of dt or else an error will be thrown. Default is [].
  • d_discontinuities: Denotes locations of discontinuities in low order derivatives. This will force FSAL algorithms which assume derivative continuity to re-evaluate the derivatives at the point of discontinuity. The default is [].
  • save_everystep: Saves the result at every step. Default is true if isempty(saveat).
  • save_on: Denotes whether intermediate solutions are saved. This overrides the settings of dense, saveat and save_everystep and is used by some applications to manually turn off saving temporarily. Everyday use of the solvers should leave this unchanged. Defaults to true.
  • save_start: Denotes whether the initial condition should be included in the solution type as the first timepoint. Defaults to true.
  • save_end: Denotes whether the final timepoint is forced to be saved, regardless of the other saving settings. Defaults to true.
  • initialize_save: Denotes whether to save after the callback initialization phase (when u_modified=true). Defaults to true.

Note that dense requires save_everystep=true and saveat=false.

Stepsize Control

These arguments control the timestepping routines.

Basic Stepsize Control

  • adaptive: Turns on adaptive timestepping for appropriate methods. Default is true.
  • abstol: Absolute tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).
  • reltol: Relative tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).
  • dt: Sets the initial stepsize. This is also the stepsize for fixed timestep methods. Defaults to an automatic choice if the method is adaptive.
  • dtmax: Maximum dt for adaptive timestepping. Defaults are package-dependent.
  • dtmin: Minimum dt for adaptive timestepping. Defaults are package-dependent.

Fixed Stepsize Usage

Note that if a method does not have adaptivity, the following rules apply:

  • If dt is set, then the algorithm will step with size dt each iteration.
  • If tstops and dt are both set, then the algorithm will step with either a size dt, or use a smaller step to hit the tstops point.
  • If tstops is set without dt, then the algorithm will step directly to each value in tstops
  • If neither dt nor tstops are set, the solver will throw an error.

Memory Optimizations

  • alias_u0: allows the solver to alias the initial condition array that is contained in the problem struct. Defaults to false.
  • cache: pass a solver cache to decrease the construction time. This is not implemented for any of the problem interfaces at this moment.

Miscellaneous

  • maxiters: Maximum number of iterations before stopping.
  • callback: Specifies a callback function that is called between iterations.
  • verbose: Toggles whether warnings are thrown when the solver exits early. Defaults to true.

Progress Monitoring

These arguments control the usage of the progressbar in the logger.

  • progress: Turns on/off the Juno progressbar. Default is false.
  • progress_steps: Numbers of steps between updates of the progress bar. Default is 1000.
  • progress_name: Controls the name of the progressbar. Default is the name of the problem type.
  • progress_message: Controls the message with the progressbar. Defaults to showing dt, t, the maximum of u.

The progress bars all use the Julia Logging interface in order to be generic to the IDE or programming tool that is used. For more information on how this is all put together, see this discussion.

Error Calculations

If you are using the test problems (i.e. SciMLFunctions where f.analytic is defined), then options control the errors which are calculated. By default, any cheap error estimates are always calculated. Extra keyword arguments include:

  • timeseries_errors
  • dense_errors

for specifying more expensive errors.

Automatic Differentiation Control

See the Automatic Differentiation page for a full description of sensealg

diff --git a/dev/interfaces/Differentiation/index.html b/dev/interfaces/Differentiation/index.html index 08d91b3c7..37d8192d2 100644 --- a/dev/interfaces/Differentiation/index.html +++ b/dev/interfaces/Differentiation/index.html @@ -25,4 +25,4 @@ function _concrete_solve_forward(args...; kwargs...) error("No sensitivity rules exist. Check that you added `using DiffEqSensitivity`") -end

The sensitivity mechanism is kept in a separate package because of the high dependency and load time cost introduced by the automatic differentiation libraries. Different choices of automatic differentiation are then selected by the sensealg keyword argument in solve, which is made into a positional argument in the _solve_adjoint and other functions in order to allow dispatch.

SensitivityADPassThrough

The special sensitivity algorithm SensitivityADPassThrough is used to ignore the internal sensitivity dispatches and instead do automatic differentiation directly through the solver. Generally this sensealg is only used internally.

Note about ForwardDiff

ForwardDiff does not use ChainRules.jl and thus it completely ignores the special handling.

+end

The sensitivity mechanism is kept in a separate package because of the high dependency and load time cost introduced by the automatic differentiation libraries. Different choices of automatic differentiation are then selected by the sensealg keyword argument in solve, which is made into a positional argument in the _solve_adjoint and other functions in order to allow dispatch.

SensitivityADPassThrough

The special sensitivity algorithm SensitivityADPassThrough is used to ignore the internal sensitivity dispatches and instead do automatic differentiation directly through the solver. Generally this sensealg is only used internally.

Note about ForwardDiff

ForwardDiff does not use ChainRules.jl and thus it completely ignores the special handling.

diff --git a/dev/interfaces/Init_Solve/index.html b/dev/interfaces/Init_Solve/index.html index ae350f40b..2d2d4c1d7 100644 --- a/dev/interfaces/Init_Solve/index.html +++ b/dev/interfaces/Init_Solve/index.html @@ -1,3 +1,3 @@ The SciML init and solve Functions · SciMLBase.jl

The SciML init and solve Functions

solve function has the default definition

solve(args...; kwargs...) = solve!(init(args...; kwargs...))

The interface for the three functions is as follows:

init(::ProblemType, args...; kwargs...) :: IteratorType
-solve!(::IteratorType) :: SolutionType

where ProblemType, IteratorType, and SolutionType are the types defined in your package.

To avoid method ambiguity, the first argument of solve, solve!, and init must be dispatched on the type defined in your package. For example, do not define a method such as

init(::AbstractVector, ::AlgorithmType)

init and the Iterator Interface

init's return gives an IteratorType which is designed to allow the user to have more direct handling over the internal solving process. Because of this internal nature, the IteratorType has a less unified interface across problem types than other portions like ProblemType and SolutionType. For example, for differential equations this is the Integrator Interface designed for mutating solutions in a manner for callback implementation, which is distinctly different from the LinearSolve init interface which is designed for caching efficiency with reusing factorizations.

__solve and High-Level Handling

While init and solve are the common entry point for users, solver packages will mostly define dispatches on SciMLBase.__init and SciMLBase.__solve. The reason is because this allows for SciMLBase.init and SciMLBase.solve to have common implementations across all solvers for doing things such as checking for common errors and throwing high level messages. Solvers can opt-out of the high level error handling by directly defining SciMLBase.init and SciMLBase.solve instead, though this is not recommended in order to allow for uniformity of the error messages.

+solve!(::IteratorType) :: SolutionType

where ProblemType, IteratorType, and SolutionType are the types defined in your package.

To avoid method ambiguity, the first argument of solve, solve!, and init must be dispatched on the type defined in your package. For example, do not define a method such as

init(::AbstractVector, ::AlgorithmType)

init and the Iterator Interface

init's return gives an IteratorType which is designed to allow the user to have more direct handling over the internal solving process. Because of this internal nature, the IteratorType has a less unified interface across problem types than other portions like ProblemType and SolutionType. For example, for differential equations this is the Integrator Interface designed for mutating solutions in a manner for callback implementation, which is distinctly different from the LinearSolve init interface which is designed for caching efficiency with reusing factorizations.

__solve and High-Level Handling

While init and solve are the common entry point for users, solver packages will mostly define dispatches on SciMLBase.__init and SciMLBase.__solve. The reason is because this allows for SciMLBase.init and SciMLBase.solve to have common implementations across all solvers for doing things such as checking for common errors and throwing high level messages. Solvers can opt-out of the high level error handling by directly defining SciMLBase.init and SciMLBase.solve instead, though this is not recommended in order to allow for uniformity of the error messages.

diff --git a/dev/interfaces/PDE/index.html b/dev/interfaces/PDE/index.html index 920973e28..9e6d075ed 100644 --- a/dev/interfaces/PDE/index.html +++ b/dev/interfaces/PDE/index.html @@ -23,4 +23,4 @@ @named pde_system = PDESystem(eq,bcs,domains,[t,x],[u])source

Domains (WIP)

Domains are specifying by saying indepvar in domain, where indepvar is a single or a collection of independent variables, and domain is the chosen domain type. A 2-tuple can be used to indicate an Interval. Thus forms for the indepvar can be like:

t ∈ (0.0,1.0)
 (t,x) ∈ UnitDisk()
-[v,w,x,y,z] ∈ VectorUnitBall(5)

Domain Types (WIP)

import from DomainSets.jl, but a 2-tuple can be used instead)

discretize and symbolic_discretize

The only functions which act on a PDESystem are the following:

Boundary Conditions (WIP)

Transformations

Analyses

Discretizer Ecosystem

NeuralPDE.jl: PhysicsInformedNN

NeuralPDE.jl defines the PhysicsInformedNN discretizer which uses a DiffEqFlux.jl neural network to solve the differential equation.

MethodOfLines.jl: MOLFiniteDifference (WIP)

MethodOfLines.jl defines the MOLFiniteDifference discretizer which performs a finite difference discretization using the DiffEqOperators.jl stencils. These stencils make use of NNLib.jl for fast operations on semi-linear domains.

+[v,w,x,y,z] ∈ VectorUnitBall(5)

Domain Types (WIP)

import from DomainSets.jl, but a 2-tuple can be used instead)

discretize and symbolic_discretize

The only functions which act on a PDESystem are the following:

Boundary Conditions (WIP)

Transformations

Analyses

Discretizer Ecosystem

NeuralPDE.jl: PhysicsInformedNN

NeuralPDE.jl defines the PhysicsInformedNN discretizer which uses a DiffEqFlux.jl neural network to solve the differential equation.

MethodOfLines.jl: MOLFiniteDifference (WIP)

MethodOfLines.jl defines the MOLFiniteDifference discretizer which performs a finite difference discretization using the DiffEqOperators.jl stencils. These stencils make use of NNLib.jl for fast operations on semi-linear domains.

diff --git a/dev/interfaces/Problems/index.html b/dev/interfaces/Problems/index.html index 0bd1ab005..c9afb28b7 100644 --- a/dev/interfaces/Problems/index.html +++ b/dev/interfaces/Problems/index.html @@ -1,19 +1,19 @@ SciMLProblems · SciMLBase.jl

SciMLProblems

The cornerstone of the SciML common interface is the problem type definition. These definitions are the encoding of mathematical problems into a numerically computable form.

Note About Symbolics and ModelingToolkit

The symbolic analog to the problem interface is the ModelingToolkit AbstractSystem. For example, ODESystem is the symbolic analog to ODEProblem. Each of these system types have a method for constructing the associated problem and function types.

Definition of the AbstractSciMLProblem Interface

The following standard principles should be adhered to across all AbstractSciMLProblem instantiations.

In-place Specification

Each AbstractSciMLProblem type can be called with an "is inplace" (iip) choice. For example:

ODEProblem(f,u0,tspan,p)
-ODEProblem{iip}(f,u0,tspan,p)

which is a boolean for whether the function is in the inplace form (mutating to change the first value). This is automatically determined using the methods table but note that for full type-inferability of the AbstractSciMLProblem this iip-ness should be specified.

Additionally, the functions are fully specialized to reduce the runtimes. If one would instead like to not specialize on the functions to reduce compile time, then one can set recompile to false.

Specialization Levels

Specialization levels in problem definitions are used to control the amount of compilation specialization is performed on the model functions in order to trade off between runtime performance, simplicity, and compile-time performance. The default choice of specialization is AutoSpecialize, which seeks to allow for using fully precompiled solvers in common scenarios but falls back to a runtime-optimal approach when further customization is used.

Specialization levels are given as the second type parameter in AbstractSciMLProblem constructors. For example, this is done via:

ODEProblem{iip,specialization}(f,u0,tspan,p)

Note that iip choice is required for specialization choices to be made.

Specialization Choices

SciMLBase.AbstractSpecializationType
abstract type AbstractSpecialization

Supertype for the specialization types. Controls the compilation and function specialization behavior of SciMLFunctions, ultimately controlling the runtime vs compile-time trade-off.

source
SciMLBase.AutoSpecializeType
struct AutoSpecialize <: SciMLBase.AbstractSpecialization

The default specialization level for problem functions. AutoSpecialize works by applying a function wrap just-in-time before the solve process to disable just-in-time re-specialization of the solver to the specific choice of model f and thus allow for using a cached solver compilation from a different f. This wrapping process can lead to a small decreased runtime performance with a benefit of a greatly decreased compile-time.

Note About Benchmarking and Runtime Optimality

It is recommended that AutoSpecialize is not used in any benchmarking due to the potential effect of function wrapping on runtimes. AutoSpecialize's use case is targeted at decreased latency for REPL performance and not for cases where where top runtime performance is required (such as in optimization loops). Generally, for non-stiff equations the cost will be minimal and potentially not even measurable. For stiff equations, function wrapping has the limitation that only chunk sized 1 Dual numbers are allowed, which can decrease Jacobian construction performance.

Limitations of AutoSpecialize

The following limitations are not fundamental to the implementation of AutoSpecialize, but are instead chosen as a compromise between default precompilation times and ease of maintenance. Please open an issue to discuss lifting any potential limitations.

  • AutoSpecialize is only setup to wrap the functions from in-place ODEs. Other cases are excluded for the time being due to time limitations.
  • AutoSpecialize will only lead to compilation reuse if the ODEFunction's other functions (such as jac and tgrad) are the default nothing. These could be JIT wrapped as well in a future version.
  • AutoSpecialize'd functions are only compatible with Jacobian calculations performed with chunk size 1, and only with tag DiffEqBase.OrdinaryDiffEqTag(). Thus ODE solvers written on the common interface must be careful to detect the AutoSpecialize case and perform differentiation under these constraints, use finite differencing, or manually unwrap before solving. This will lead to decreased runtime performance for sufficiently large Jacobians.
  • AutoSpecialize only wraps on Julia v1.8 and higher.
  • AutoSpecialize does not handle cases with units. If unitful values are detected, wrapping is automatically disabled.
  • AutoSpecialize only wraps cases for which promote_rule is defined between u0 and dual numbers, u0 and t, and for which ArrayInterface.promote_eltype is defined on u0 to dual numbers.
  • AutoSpecialize only wraps cases for which f.mass_matrix isa UniformScaling, the default.
  • AutoSpecialize does not wrap cases where f isa AbstractSciMLOperator
  • By default, only the u0 isa Vector{Float64}, eltype(tspan) isa Float64, and typeof(p) isa Union{Vector{Float64},SciMLBase.NullParameters} are specialized by the solver libraries. Other forms can be specialized with AutoSpecialize, but must be done in the precompilation of downstream libraries.
  • AutoSpecialized functions are manually unwrapped in adjoint methods in SciMLSensitivity.jl in order to allow compiler support for automatic differentiation. Improved versions of adjoints which decrease the recompilation surface will come in non-breaking updates.

Cases where automatic wrapping is disabled are equivalent to FullSpecialize.

Example

f(du,u,p,t) = (du .= u)
+ODEProblem{iip}(f,u0,tspan,p)

which is a boolean for whether the function is in the inplace form (mutating to change the first value). This is automatically determined using the methods table but note that for full type-inferability of the AbstractSciMLProblem this iip-ness should be specified.

Additionally, the functions are fully specialized to reduce the runtimes. If one would instead like to not specialize on the functions to reduce compile time, then one can set recompile to false.

Specialization Levels

Specialization levels in problem definitions are used to control the amount of compilation specialization is performed on the model functions in order to trade off between runtime performance, simplicity, and compile-time performance. The default choice of specialization is AutoSpecialize, which seeks to allow for using fully precompiled solvers in common scenarios but falls back to a runtime-optimal approach when further customization is used.

Specialization levels are given as the second type parameter in AbstractSciMLProblem constructors. For example, this is done via:

ODEProblem{iip,specialization}(f,u0,tspan,p)

Note that iip choice is required for specialization choices to be made.

Specialization Choices

SciMLBase.AbstractSpecializationType
abstract type AbstractSpecialization

Supertype for the specialization types. Controls the compilation and function specialization behavior of SciMLFunctions, ultimately controlling the runtime vs compile-time trade-off.

source
SciMLBase.AutoSpecializeType
struct AutoSpecialize <: SciMLBase.AbstractSpecialization

The default specialization level for problem functions. AutoSpecialize works by applying a function wrap just-in-time before the solve process to disable just-in-time re-specialization of the solver to the specific choice of model f and thus allow for using a cached solver compilation from a different f. This wrapping process can lead to a small decreased runtime performance with a benefit of a greatly decreased compile-time.

Note About Benchmarking and Runtime Optimality

It is recommended that AutoSpecialize is not used in any benchmarking due to the potential effect of function wrapping on runtimes. AutoSpecialize's use case is targeted at decreased latency for REPL performance and not for cases where where top runtime performance is required (such as in optimization loops). Generally, for non-stiff equations the cost will be minimal and potentially not even measurable. For stiff equations, function wrapping has the limitation that only chunk sized 1 Dual numbers are allowed, which can decrease Jacobian construction performance.

Limitations of AutoSpecialize

The following limitations are not fundamental to the implementation of AutoSpecialize, but are instead chosen as a compromise between default precompilation times and ease of maintenance. Please open an issue to discuss lifting any potential limitations.

  • AutoSpecialize is only setup to wrap the functions from in-place ODEs. Other cases are excluded for the time being due to time limitations.
  • AutoSpecialize will only lead to compilation reuse if the ODEFunction's other functions (such as jac and tgrad) are the default nothing. These could be JIT wrapped as well in a future version.
  • AutoSpecialize'd functions are only compatible with Jacobian calculations performed with chunk size 1, and only with tag DiffEqBase.OrdinaryDiffEqTag(). Thus ODE solvers written on the common interface must be careful to detect the AutoSpecialize case and perform differentiation under these constraints, use finite differencing, or manually unwrap before solving. This will lead to decreased runtime performance for sufficiently large Jacobians.
  • AutoSpecialize only wraps on Julia v1.8 and higher.
  • AutoSpecialize does not handle cases with units. If unitful values are detected, wrapping is automatically disabled.
  • AutoSpecialize only wraps cases for which promote_rule is defined between u0 and dual numbers, u0 and t, and for which ArrayInterface.promote_eltype is defined on u0 to dual numbers.
  • AutoSpecialize only wraps cases for which f.mass_matrix isa UniformScaling, the default.
  • AutoSpecialize does not wrap cases where f isa AbstractSciMLOperator
  • By default, only the u0 isa Vector{Float64}, eltype(tspan) isa Float64, and typeof(p) isa Union{Vector{Float64},SciMLBase.NullParameters} are specialized by the solver libraries. Other forms can be specialized with AutoSpecialize, but must be done in the precompilation of downstream libraries.
  • AutoSpecialized functions are manually unwrapped in adjoint methods in SciMLSensitivity.jl in order to allow compiler support for automatic differentiation. Improved versions of adjoints which decrease the recompilation surface will come in non-breaking updates.

Cases where automatic wrapping is disabled are equivalent to FullSpecialize.

Example

f(du,u,p,t) = (du .= u)
 
 # Note this is the same as ODEProblem(f, [1.0], (0.0,1.0))
 # If no preferences are set
-ODEProblem{true, SciMLBase.AutoSpecialize}(f, [1.0], (0.0,1.0))
source
SciMLBase.NoSpecializeType
struct NoSpecialize <: SciMLBase.AbstractSpecialization

NoSpecialize forces SciMLFunctions to not specialize on the types of functions wrapped within it. This ultimately contributes to a form such that every prob.f type is the same, meaning compilation caches are fully reused, with the downside of losing runtime performance. NoSpecialize is the form that most fully trades off runtime for compile time. Unlike AutoSpecialize, NoSpecialize can be used with any SciMLFunction.

Example

f(du,u,p,t) = (du .= u)
-ODEProblem{true, SciMLBase.NoSpecialize}(f, [1.0], (0.0,1.0))
source
SciMLBase.FunctionWrapperSpecializeType
struct FunctionWrapperSpecialize <: SciMLBase.AbstractSpecialization

FunctionWrapperSpecialize is an eager wrapping choice which performs a function wrapping during the ODEProblem construction. This performs the function wrapping at the earliest possible point, giving the best compile-time vs runtime performance, but with the difficulty that any usage of prob.f needs to account for the function wrapper's presence. While optimal in a performance sense, this method has many usability issues with nonstandard solvers and analyses as it requires unwrapping before re-wrapping for any type changes. Thus this method is not used by default. Given that the compile-time different is almost undetectable from AutoSpecialize, this method is mostly used as a benchmarking reference for speed of light for AutoSpecialize.

Limitations of FunctionWrapperSpecialize

FunctionWrapperSpecialize has all of the limitations of AutoSpecialize, but also includes the limitations:

  • prob.f is directly specialized to the types of (u,p,t), and any usage of prob.f on other types first requires using SciMLBase.unwrapped_f(prob.f) to remove the function wrapper.
  • FunctionWrapperSpecialize can only be used by the ODEProblem constructor. If an ODEFunction is being constructed, the user must manually use DiffEqBase.wrap_iip on f before calling ODEFunction{true,FunctionWrapperSpecialize}(f). This is a fundamental limitation of the approach as the types of (u,p,t) are required in the construction process and not accessible in the AbstractSciMLFunction constructors.

Example

f(du,u,p,t) = (du .= u)
-ODEProblem{true, SciMLBase.FunctionWrapperSpecialize}(f, [1.0], (0.0,1.0))
source
SciMLBase.FullSpecializeType
struct FullSpecialize <: SciMLBase.AbstractSpecialization

FullSpecialize is an eager specialization choice which directly types the AbstractSciMLFunction struct to match the type of the model f. This forces recompilation of the solver on each new function type f, leading to the most compile times with the benefit of having the best runtime performance.

FullSpecialize should be used in all cases where top runtime performance is required, such as in long-running simulations and benchmarking.

Example

f(du,u,p,t) = (du .= u)
-ODEProblem{true, SciMLBase.FullSpecialize}(f, [1.0], (0.0,1.0))
source
Note

The specialization level must be precompile snooped in the appropriate solver package in order to enable the full precompilation and system image generation for zero-latency usage. By default, this is only done with AutoSpecialize and on types u isa Vector{Float64}, eltype(tspan) isa Float64, and p isa Union{Vector{Float64}, SciMLBase.NullParameters}. Precompilation snooping in the solvers can be done using the Preferences.jl setup on the appropriate solver. See the solver library's documentation for more details.

Default Parameters

By default, AbstractSciMLProblem types use the SciMLBase.NullParameters() singleton to define the absence of parameters by default. The reason is because this throws an informative error if the parameter is used or accessed within the user's function, for example, p[1] will throw an informative error about forgetting to pass parameters.

Keyword Argument Splatting

All AbstractSciMLProblem types allow for passing keyword arguments that would get forwarded to the solver. The reason for this is that in many cases, like in EnsembleProblem usage, a AbstractSciMLProblem might be associated with some solver configuration, such as a callback or tolerance. Thus, for flexibility the extra keyword arguments to the AbstractSciMLProblem are carried to the solver.

problem_type

AbstractSciMLProblem types include a non-public API definition of problem_type which holds a trait type corresponding to the way the AbstractSciMLProblem was constructed. For example, if a SecondOrderODEProblem constructor is used, the returned problem is simply a ODEProblem for interoperability with any ODEProblem algorithm. However, in this case the problem_type will be populated with the SecondOrderODEProblem type, indicating the original definition and extra structure.

Remake

SciMLBase.remakeFunction
remake(thing; <keyword arguments>)

Re-construct thing with new field values specified by the keyword arguments.

source
remake(prob::ODEProblem; f = missing, u0 = missing, tspan = missing,
-       p = missing, kwargs = missing, _kwargs...)

Remake the given ODEProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.

source
remake(prob::BVProblem; f = missing, u0 = missing, tspan = missing,
-       p = missing, kwargs = missing, problem_type = missing, _kwargs...)

Remake the given BVProblem.

source
remake(prob::SDEProblem; f = missing, u0 = missing, tspan = missing,
+ODEProblem{true, SciMLBase.AutoSpecialize}(f, [1.0], (0.0,1.0))
source
SciMLBase.NoSpecializeType
struct NoSpecialize <: SciMLBase.AbstractSpecialization

NoSpecialize forces SciMLFunctions to not specialize on the types of functions wrapped within it. This ultimately contributes to a form such that every prob.f type is the same, meaning compilation caches are fully reused, with the downside of losing runtime performance. NoSpecialize is the form that most fully trades off runtime for compile time. Unlike AutoSpecialize, NoSpecialize can be used with any SciMLFunction.

Example

f(du,u,p,t) = (du .= u)
+ODEProblem{true, SciMLBase.NoSpecialize}(f, [1.0], (0.0,1.0))
source
SciMLBase.FunctionWrapperSpecializeType
struct FunctionWrapperSpecialize <: SciMLBase.AbstractSpecialization

FunctionWrapperSpecialize is an eager wrapping choice which performs a function wrapping during the ODEProblem construction. This performs the function wrapping at the earliest possible point, giving the best compile-time vs runtime performance, but with the difficulty that any usage of prob.f needs to account for the function wrapper's presence. While optimal in a performance sense, this method has many usability issues with nonstandard solvers and analyses as it requires unwrapping before re-wrapping for any type changes. Thus this method is not used by default. Given that the compile-time different is almost undetectable from AutoSpecialize, this method is mostly used as a benchmarking reference for speed of light for AutoSpecialize.

Limitations of FunctionWrapperSpecialize

FunctionWrapperSpecialize has all of the limitations of AutoSpecialize, but also includes the limitations:

  • prob.f is directly specialized to the types of (u,p,t), and any usage of prob.f on other types first requires using SciMLBase.unwrapped_f(prob.f) to remove the function wrapper.
  • FunctionWrapperSpecialize can only be used by the ODEProblem constructor. If an ODEFunction is being constructed, the user must manually use DiffEqBase.wrap_iip on f before calling ODEFunction{true,FunctionWrapperSpecialize}(f). This is a fundamental limitation of the approach as the types of (u,p,t) are required in the construction process and not accessible in the AbstractSciMLFunction constructors.

Example

f(du,u,p,t) = (du .= u)
+ODEProblem{true, SciMLBase.FunctionWrapperSpecialize}(f, [1.0], (0.0,1.0))
source
SciMLBase.FullSpecializeType
struct FullSpecialize <: SciMLBase.AbstractSpecialization

FullSpecialize is an eager specialization choice which directly types the AbstractSciMLFunction struct to match the type of the model f. This forces recompilation of the solver on each new function type f, leading to the most compile times with the benefit of having the best runtime performance.

FullSpecialize should be used in all cases where top runtime performance is required, such as in long-running simulations and benchmarking.

Example

f(du,u,p,t) = (du .= u)
+ODEProblem{true, SciMLBase.FullSpecialize}(f, [1.0], (0.0,1.0))
source
Note

The specialization level must be precompile snooped in the appropriate solver package in order to enable the full precompilation and system image generation for zero-latency usage. By default, this is only done with AutoSpecialize and on types u isa Vector{Float64}, eltype(tspan) isa Float64, and p isa Union{Vector{Float64}, SciMLBase.NullParameters}. Precompilation snooping in the solvers can be done using the Preferences.jl setup on the appropriate solver. See the solver library's documentation for more details.

Default Parameters

By default, AbstractSciMLProblem types use the SciMLBase.NullParameters() singleton to define the absence of parameters by default. The reason is because this throws an informative error if the parameter is used or accessed within the user's function, for example, p[1] will throw an informative error about forgetting to pass parameters.

Keyword Argument Splatting

All AbstractSciMLProblem types allow for passing keyword arguments that would get forwarded to the solver. The reason for this is that in many cases, like in EnsembleProblem usage, a AbstractSciMLProblem might be associated with some solver configuration, such as a callback or tolerance. Thus, for flexibility the extra keyword arguments to the AbstractSciMLProblem are carried to the solver.

problem_type

AbstractSciMLProblem types include a non-public API definition of problem_type which holds a trait type corresponding to the way the AbstractSciMLProblem was constructed. For example, if a SecondOrderODEProblem constructor is used, the returned problem is simply a ODEProblem for interoperability with any ODEProblem algorithm. However, in this case the problem_type will be populated with the SecondOrderODEProblem type, indicating the original definition and extra structure.

Remake

SciMLBase.remakeFunction
remake(thing; <keyword arguments>)

Re-construct thing with new field values specified by the keyword arguments.

source
remake(prob::ODEProblem; f = missing, u0 = missing, tspan = missing,
+       p = missing, kwargs = missing, _kwargs...)

Remake the given ODEProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.

source
remake(prob::BVProblem; f = missing, u0 = missing, tspan = missing,
+       p = missing, kwargs = missing, problem_type = missing, _kwargs...)

Remake the given BVProblem.

source
remake(prob::SDEProblem; f = missing, u0 = missing, tspan = missing,
        p = missing, noise = missing, noise_rate_prototype = missing,
-       seed = missing, kwargs = missing, _kwargs...)

Remake the given SDEProblem.

source
remake(prob::OptimizationProblem; f = missing, u0 = missing, p = missing,
+       seed = missing, kwargs = missing, _kwargs...)

Remake the given SDEProblem.

source
remake(prob::OptimizationProblem; f = missing, u0 = missing, p = missing,
     lb = missing, ub = missing, int = missing, lcons = missing, ucons = missing,
-    sense = missing, kwargs = missing, _kwargs...)

Remake the given OptimizationProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.

source
remake(prob::NonlinearProblem; f = missing, u0 = missing, p = missing,
-    problem_type = missing, kwargs = missing, _kwargs...)

Remake the given NonlinearProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.

source
remake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing,
-    kwargs = missing, _kwargs...)

Remake the given NonlinearLeastSquaresProblem.

source

Problem Traits

SciMLBase.isinplaceMethod
isinplace(prob::AbstractSciMLProblem)

Determine whether the function of the given problem operates in place or not.

source

AbstractSciMLProblem API

Defaults and Preferences

SpecializationLevel at SciMLBase can be used to set the default specialization level. The following shows how to set the specialization default to FullSpecialize:

using Preferences, UUIDs
-set_preferences!(UUID("0bca4576-84f4-4d90-8ffe-ffa030f20462"), "SpecializationLevel" => "FullSpecialize")

The default is AutoSpecialize.

Abstract SciMLProblems

SciMLBase.AbstractDEProblemType
abstract type AbstractDEProblem <: SciMLBase.AbstractSciMLProblem

Base type for all DifferentialEquations.jl problems. Concrete subtypes of AbstractDEProblem contain the necessary information to fully define a differential equation of the corresponding type.

source
SciMLBase.AbstractODEProblemType
abstract type AbstractODEProblem{uType, tType, isinplace} <: SciMLBase.AbstractDEProblem

Base for types which define ODE problems.

source
SciMLBase.AbstractDiscreteProblemType
abstract type AbstractDiscreteProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}

Base for types which define discrete problems.

source
SciMLBase.AbstractRODEProblemType
abstract type AbstractRODEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractDEProblem

Base for types which define RODE problems.

source
SciMLBase.AbstractSDEProblemType
abstract type AbstractSDEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractRODEProblem{uType, tType, isinplace, ND}

Base for types which define SDE problems.

source
SciMLBase.AbstractDAEProblemType
abstract type AbstractDAEProblem{uType, duType, tType, isinplace} <: SciMLBase.AbstractDEProblem

Base for types which define DAE problems.

source
SciMLBase.AbstractDDEProblemType
abstract type AbstractDDEProblem{uType, tType, lType, isinplace} <: SciMLBase.AbstractDEProblem

Base for types which define DDE problems.

source
SciMLBase.AbstractBVProblemType
abstract type AbstractBVProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}

Base for types which define BVP problems.

source
SciMLBase.AbstractSDDEProblemType
abstract type AbstractSDDEProblem{uType, tType, lType, isinplace, ND} <: SciMLBase.AbstractDEProblem

Base for types which define SDDE problems.

source
+ sense = missing, kwargs = missing, _kwargs...)

Remake the given OptimizationProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.

source
remake(prob::NonlinearProblem; f = missing, u0 = missing, p = missing,
+    problem_type = missing, kwargs = missing, _kwargs...)

Remake the given NonlinearProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.

source
remake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing,
+    kwargs = missing, _kwargs...)

Remake the given NonlinearLeastSquaresProblem.

source

Problem Traits

SciMLBase.isinplaceMethod
isinplace(prob::AbstractSciMLProblem)

Determine whether the function of the given problem operates in place or not.

source
SciMLBase.is_diagonal_noiseFunction
is_diagonal_noise(prob::AbstractSciMLProblem)
source

AbstractSciMLProblem API

Defaults and Preferences

SpecializationLevel at SciMLBase can be used to set the default specialization level. The following shows how to set the specialization default to FullSpecialize:

using Preferences, UUIDs
+set_preferences!(UUID("0bca4576-84f4-4d90-8ffe-ffa030f20462"), "SpecializationLevel" => "FullSpecialize")

The default is AutoSpecialize.

Abstract SciMLProblems

SciMLBase.AbstractSciMLProblemType
abstract type AbstractSciMLProblem
source
SciMLBase.AbstractDEProblemType
abstract type AbstractDEProblem <: SciMLBase.AbstractSciMLProblem

Base type for all DifferentialEquations.jl problems. Concrete subtypes of AbstractDEProblem contain the necessary information to fully define a differential equation of the corresponding type.

source
SciMLBase.AbstractLinearProblemType
abstract type AbstractLinearProblem{bType, isinplace} <: SciMLBase.AbstractSciMLProblem

Base for types which define linear systems.

source
SciMLBase.AbstractNonlinearProblemType
abstract type AbstractNonlinearProblem{uType, isinplace} <: SciMLBase.AbstractDEProblem

Base for types which define nonlinear solve problems (f(u)=0).

source
SciMLBase.AbstractIntegralProblemType
abstract type AbstractIntegralProblem{isinplace} <: SciMLBase.AbstractSciMLProblem

Base for types which define integrals suitable for quadrature.

source
SciMLBase.AbstractOptimizationProblemType
abstract type AbstractOptimizationProblem{isinplace} <: SciMLBase.AbstractSciMLProblem

Base for types which define equations for optimization.

source
SciMLBase.AbstractNoiseProblemType
abstract type AbstractNoiseProblem <: SciMLBase.AbstractDEProblem
source
SciMLBase.AbstractODEProblemType
abstract type AbstractODEProblem{uType, tType, isinplace} <: SciMLBase.AbstractDEProblem

Base for types which define ODE problems.

source
SciMLBase.AbstractDiscreteProblemType
abstract type AbstractDiscreteProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}

Base for types which define discrete problems.

source
SciMLBase.AbstractAnalyticalProblemType
abstract type AbstractAnalyticalProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}
source
SciMLBase.AbstractRODEProblemType
abstract type AbstractRODEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractDEProblem

Base for types which define RODE problems.

source
SciMLBase.AbstractSDEProblemType
abstract type AbstractSDEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractRODEProblem{uType, tType, isinplace, ND}

Base for types which define SDE problems.

source
SciMLBase.AbstractDAEProblemType
abstract type AbstractDAEProblem{uType, duType, tType, isinplace} <: SciMLBase.AbstractDEProblem

Base for types which define DAE problems.

source
SciMLBase.AbstractDDEProblemType
abstract type AbstractDDEProblem{uType, tType, lType, isinplace} <: SciMLBase.AbstractDEProblem

Base for types which define DDE problems.

source
SciMLBase.AbstractConstantLagDDEProblemType
abstract type AbstractConstantLagDDEProblem{uType, tType, lType, isinplace} <: SciMLBase.AbstractDDEProblem{uType, tType, lType, isinplace}
source
SciMLBase.AbstractSecondOrderODEProblemType
abstract type AbstractSecondOrderODEProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}
source
SciMLBase.AbstractBVProblemType
abstract type AbstractBVProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}

Base for types which define BVP problems.

source
SciMLBase.AbstractJumpProblemType
abstract type AbstractJumpProblem{P, J} <: SciMLBase.AbstractDEProblem

Base for types which define jump problems.

source
SciMLBase.AbstractSDDEProblemType
abstract type AbstractSDDEProblem{uType, tType, lType, isinplace, ND} <: SciMLBase.AbstractDEProblem

Base for types which define SDDE problems.

source
SciMLBase.AbstractConstantLagSDDEProblemType
abstract type AbstractConstantLagSDDEProblem{uType, tType, lType, isinplace, ND} <: SciMLBase.AbstractSDDEProblem{uType, tType, lType, isinplace, ND}
source
SciMLBase.AbstractPDEProblemType
abstract type AbstractPDEProblem <: SciMLBase.AbstractDEProblem

Base for types which define PDE problems.

source
diff --git a/dev/interfaces/SciMLFunctions/index.html b/dev/interfaces/SciMLFunctions/index.html index b0e76f767..89d24d44e 100644 --- a/dev/interfaces/SciMLFunctions/index.html +++ b/dev/interfaces/SciMLFunctions/index.html @@ -4,4 +4,4 @@ f = (du,u,p,t) -> du .= t .* u jac = (J,u,p,t) -> (J[1,1] = t; J[2,2] = t; J) jp = Diagonal(zeros(2)) -fun = ODEFunction(f; jac=jac, jac_prototype=jp)

Note that the integrators will always make a deep copy of fun.jac_prototype, so there's no worry of aliasing.

In general the jacobian prototype can be anything that has mul! defined, in particular sparse matrices or custom lazy types that support mul!. A special case is when the jac_prototype is a AbstractSciMLOperator, in which case you do not need to supply jac as it is automatically set to update_coefficients!. Refer to the SciMLOperators section for more information on setting up time/parameter dependent operators.

Sparsity Handling

The solver libraries internally use packages such as FiniteDiff.jl and SparseDiffTools.jl for high performance calculation of sparse Jacobians and Hessians, along with matrix-free calculations of Jacobian-Vector products (Jv), vector-Jacobian products (v'J), and Hessian-vector products (H*v). The SciML interface gives users the ability to control these connections in order to allow for top notch performance.

The key arguments in the SciMLFunction is the prototype, which is an object that will be used as the underlying Jacobian/Hessian. Thus if one wants to use a sparse Jacobian, one should specify jac_prototype to be a sparse matrix. The sparsity pattern used in the differentiation scheme is defined by sparsity. By default, sparsity=jac_prototype, meaning that the sparse automatic differentiation scheme should specialize on the sparsity pattern given by the actual sparsity pattern. This can be overridden to say perform partial matrix coloring approximations. Additionally, the color vector for the sparse differentiation directions can be specified directly via colorvec. For more information on how these arguments control the differentiation process, see the aforementioned differentiation library documentations.

Traits

Missing docstring.

Missing docstring for SciMLBase.isinplace(f::SciMLBase.AbstractSciMLFunction). Check Documenter's build log for details.

AbstractSciMLFunction API

Abstract SciML Functions

SciMLBase.AbstractDiffEqFunctionType
abstract type AbstractDiffEqFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}

Base for types defining differential equation functions.

source
SciMLBase.AbstractODEFunctionType
abstract type AbstractODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractSDEFunctionType
abstract type AbstractSDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractDDEFunctionType
abstract type AbstractDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractDAEFunctionType
abstract type AbstractDAEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractRODEFunctionType
abstract type AbstractRODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractDiscreteFunctionType
abstract type AbstractDiscreteFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractSDDEFunctionType
abstract type AbstractSDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractNonlinearFunctionType
abstract type AbstractNonlinearFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}
source
+fun = ODEFunction(f; jac=jac, jac_prototype=jp)

Note that the integrators will always make a deep copy of fun.jac_prototype, so there's no worry of aliasing.

In general the jacobian prototype can be anything that has mul! defined, in particular sparse matrices or custom lazy types that support mul!. A special case is when the jac_prototype is a AbstractSciMLOperator, in which case you do not need to supply jac as it is automatically set to update_coefficients!. Refer to the SciMLOperators section for more information on setting up time/parameter dependent operators.

Sparsity Handling

The solver libraries internally use packages such as FiniteDiff.jl and SparseDiffTools.jl for high performance calculation of sparse Jacobians and Hessians, along with matrix-free calculations of Jacobian-Vector products (Jv), vector-Jacobian products (v'J), and Hessian-vector products (H*v). The SciML interface gives users the ability to control these connections in order to allow for top notch performance.

The key arguments in the SciMLFunction is the prototype, which is an object that will be used as the underlying Jacobian/Hessian. Thus if one wants to use a sparse Jacobian, one should specify jac_prototype to be a sparse matrix. The sparsity pattern used in the differentiation scheme is defined by sparsity. By default, sparsity=jac_prototype, meaning that the sparse automatic differentiation scheme should specialize on the sparsity pattern given by the actual sparsity pattern. This can be overridden to say perform partial matrix coloring approximations. Additionally, the color vector for the sparse differentiation directions can be specified directly via colorvec. For more information on how these arguments control the differentiation process, see the aforementioned differentiation library documentations.

Traits

Missing docstring.

Missing docstring for SciMLBase.isinplace(f::SciMLBase.AbstractSciMLFunction). Check Documenter's build log for details.

AbstractSciMLFunction API

Abstract SciML Functions

SciMLBase.AbstractDiffEqFunctionType
abstract type AbstractDiffEqFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}

Base for types defining differential equation functions.

source
SciMLBase.AbstractODEFunctionType
abstract type AbstractODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractSDEFunctionType
abstract type AbstractSDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractDDEFunctionType
abstract type AbstractDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractDAEFunctionType
abstract type AbstractDAEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractRODEFunctionType
abstract type AbstractRODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractDiscreteFunctionType
abstract type AbstractDiscreteFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractSDDEFunctionType
abstract type AbstractSDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}
source
SciMLBase.AbstractNonlinearFunctionType
abstract type AbstractNonlinearFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}
source
diff --git a/dev/interfaces/Solutions/index.html b/dev/interfaces/Solutions/index.html index 7395b6ca8..8f1f9b0b9 100644 --- a/dev/interfaces/Solutions/index.html +++ b/dev/interfaces/Solutions/index.html @@ -1,2 +1,2 @@ -SciMLSolutions · SciMLBase.jl

SciMLSolutions

Definition of the AbstractSciMLSolution Interface

All AbstractSciMLSolution types are a subset of some AbstractArray. Types with time series (like ODESolution) are subtypes of RecursiveArrayTools.AbstractVectorOfArray and RecursiveArrayTools.AbstractDiffEqArray where appropriate. Types without a time series (like OptimizationSolution) are directly subsets of AbstractArray.

Array Interface

Instead of working on the Vector{uType} directly, we can use the provided array interface.

sol[j]

to access the value at timestep j (if the timeseries was saved), and

sol.t[j]

to access the value of t at timestep j. For multi-dimensional systems, this will address first by component and lastly by time, and thus

sol[i,j]

will be the ith component at timestep j. Hence, sol[j][i] == sol[i, j]. This is done because Julia is column-major, so the leading dimension should be contiguous in memory. If the independent variables had shape (for example, was a matrix), then i is the linear index. We can also access solutions with shape:

sol[i,k,j]

gives the [i,k] component of the system at timestep j. The colon operator is supported, meaning that

sol[i,:]

gives the timeseries for the ith component.

Common Field Names

  • u: the solution values
  • t: the independent variable values, matching the length of the solution, if applicable
  • resid: the residual of the solution, if applicable
  • original: the solution object from the original solver, if it's a wrapper algorithm
  • retcode: see the documentation section on return codes
  • prob: the problem that was solved
  • alg: the algorithm used to solve the problem

Return Codes (RetCodes)

The solution types have a retcode field which returns a SciMLBase.ReturnCode.T (from EnumX.jl, see that package for the semantics of handling EnumX types) signifying the error or satisfaction state of the solution.

SciMLBase.ReturnCodeModule

SciML.ReturnCode

SciML.ReturnCode is the standard return code enum interface for the SciML interface. Return codes are notes given by the solvers to indicate the state of the solution, for example whether it successfully solved the equations, whether it failed to solve the equations, and importantly, why it exited.

Using SciML.ReturnCode

SciML.ReturnCode use the interface of EnumX.jl and thus inherits all of the behaviors of being an EnumX. This includes the Enum type itself being referred to as SciML.ReturnCode.T, and each of the constituent enum states being referred to via getproperty, i.e. SciML.ReturnCode.Success.

Note About Success Checking

Previous iterations of the interface suggested using sol.retcode == :Success, however, that is now not advised instead should be replaced with SciMLBase.successful_retcode(sol). The reason is that there are many different codes that can be interpreted as successful, such as ReturnCode.Terminated which means successfully used terminate!(integrator) to end an integration at a user-specified condition. As such, successful_retcode is the most general way to query for if the solver did not error.

Properties

  • successful_retcode(retcode::ReturnCode.T): Determines whether the output enum is considered a success state of the solver, i.e. the solver successfully solved the equations. ReturnCode.Success is the most basic form, simply declaring that it was successful, but many more informative success return codes exist as well.
source

Return Code Traits

SciMLBase.successful_retcodeFunction

successful_retcode(retcode::ReturnCode.T)::Bool successful_retcode(sol::AbstractSciMLSolution)::Bool

Returns a boolean for whether a return code should be interpreted as a form of success.

source

Specific Return Codes

SciMLBase.ReturnCode.DefaultConstant

ReturnCode.Default

The default state of the solver. If this return code is given, then the solving process is either still in process or the solver library has not been setup with the return code interface and thus the return code is undetermined.

Common Reasons for Seeing this Return Code

  • A common reason for Default return codes is that a solver is a non-SciML solver which does not fully conform to the interface. Please open an issue if this is seen and it will be improved.
  • Another common reason for a Default return code is if the solver is probed internally before the solving process is done, such as through the callback interface. Return codes are set to Default to start and are changed to Success and other return codes upon finishing the solving process or hitting a numerical difficulty.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.SuccessConstant

ReturnCode.Success

The success state of the solver. If this return code is given, then the solving process was successful, but no extra information about that success is given.

Common Reasons for Seeing this Return Code

  • This is the most common return code and most solvers will give this return code if the solving process went as expected without any errors or detected numerical issues.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.TerminatedConstant

ReturnCode.Terminated

The successful termination state of the solver. If this return code is given, then the solving process was successful at terminating the solve, usually through a callback affect! via terminate!(integrator).

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is if a user calls a callback which uses terminate!(integrator) to halt the integration at a user-chosen stopping point.
  • Another common reason for this return code is due to implicit terminate! statements in some library callbacks. For example, SteadyStateCallback uses terminate! internally, so solutions which reach steady state will have a ReturnCode.Terminated state instead of a ReturnCode.Success state. Similarly, problems solved via SteadyStateDiffEq.jl will have this ReturnCode.Terminated state if a timestepping method is used to solve to steady state.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.DtNaNConstant

ReturnCode.DtNaN

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was determined to be NaN and thus the solver could not continue.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the automatic dt selection algorithm is used but the starting derivative has a NaN or Inf derivative term. Double check that the f(u0,p,t0) term is well-defined without NaN or Inf values.
  • Another common reason for this return code is because of a user set dt which is calculated to be a NaN. If solve(prob,alg,dt=x), double check that x is not NaN.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.MaxItersConstant

ReturnCode.MaxIters

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the solver's iterations hit the maxiters either set by default or by the user in the solve/init command.

Note about Nonlinear Optimization

In nonlinear optimization, many solvers (such as OptimizationOptimisers.Adam) do not have an exit criteria other than iters == maxiters. In this case, the solvers will iterate until maxiters and exit with a Success return code, as that is a successful run of the solver and not considered to be an error state. Solves with early termination criteria, such as Optim.BFGS exiting when the gradient is sufficiently close to zero, will give ReturnCode.MaxIters on exits which require the maximum iteration.

Common Reasons for Seeing this Return Code

  • This commonly occurs in ODE solving if a non-stiff method (e.g. Tsit5) is used in an algorithm choice for a stiff ODE. It is recommended that in such cases, one tries a stiff ODE solver.
  • This commonly occurs in optimization and nonlinear solvers if the tolerance on solve to too low and cannot be achieved due to floating point error or the condition number of the solver matrix. Double check that the chosen tolerance is numerically possible.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.DtLessThanMinConstant

ReturnCode.DtLessThanMin

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was made to be less than dtmin, i.e. dt < dtmin.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the integration is going unstable. As f(u,p,t) -> infinity, the time steps required by the solver to accurately handle the dynamics decreases. When it gets sufficiently small, dtmin, an exit is thrown as the solution is likely unstable. dtmin is also chosen to be around the value where floating point issues cause t + dt == t, and thus a dt of that size is impossible at floating point precision.
  • Another common reason for this return code is if domain constraints are set, such as by using isoutofdomain, but the domain constraint is incorrect. For example, if one is solving the ODE f(u,p,t) = -u - 1, one may think "but I want a solution with u > 0 and thus I will set isoutofdomain(u,p,t) = u < 0. However, the true solution of this ODE is not positive, and thus what will occur is that the solver will try to decrease dt until it can give an accurate solution that is positive. As this is impossible, it will continue to shrink the dt until dt < dtmin and then exit with this return code.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.UnstableConstant

ReturnCode.Unstable

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the unstable_check function, as given by the unstable_check common keyword argument (or its default), give a true at the current state.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because u contains a NaN or Inf value. The default unstable_check only checks for these values.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.InitialFailureConstant

ReturnCode.InitialFailure

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because the initialization process failed.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the initialization process of a DAE solver failed to find consistent initial conditions, which can occur if the differentiation index of the DAE solver is too high. Most DAE solvers only allow for index-1 DAEs, and so an index-2 DAE will fail during this initialization. To solve this kind of problem, use ModelingToolkit.jl and its structural_simplify method to reduce the index of the DAE.
  • Another common reason for this return code is if the initial condition was not suitable for the numerical solve. For example, the initial point had a NaN or Inf. Or in optimization, this can occur if the initial point is outside of the bound constraints given by the user.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.ConvergenceFailureConstant

ReturnCode.ConvergenceFailure

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because internal nonlinear solver iterations failed to converge.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because an inappropriate nonlinear solver was chosen. If fixed point iteration is used on a stiff problem, it will be faster by avoiding the Jacobian but it will make a stiff ODE solver not stable for stiff problems!

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.FailureConstant

ReturnCode.Failure

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful but no extra information is given.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the solver is a wrapped solver (i.e. a Fortran code) which does not provide any extra information about its exit state. If this is from a Julia-based solver, please open an issue.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.ExactSolutionLeftConstant

ReturnCode.ExactSolutionLeft

The success state of the solver. If this return code is given, then the solving process was successful, and the left solution was given.

Common Reasons for Seeing this Return Code

  • The most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the left for x.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.ExactSolutionRightConstant

ReturnCode.ExactSolutionRight

The success state of the solver. If this return code is given, then the solving process was successful, and the right solution was given.

Common Reasons for Seeing this Return Code

  • The most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the right for x.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.FloatingPointLimitConstant

ReturnCode.FloatingPointLimit

The success state of the solver. If this return code is given, then the solving process was successful, and the closest floating point value to the solution was given.

Common Reasons for Seeing this Return Code

  • The most common reason for this return code is via a nonlinear solver, such as Falsi,

iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the closest floating point value to the true solution for x.

Properties

  • successful_retcode = true
source

Solution Traits

AbstractSciMLSolution API

Abstract SciML Solutions

+SciMLSolutions · SciMLBase.jl

SciMLSolutions

Definition of the AbstractSciMLSolution Interface

All AbstractSciMLSolution types are a subset of some AbstractArray. Types with time series (like ODESolution) are subtypes of RecursiveArrayTools.AbstractVectorOfArray and RecursiveArrayTools.AbstractDiffEqArray where appropriate. Types without a time series (like OptimizationSolution) are directly subsets of AbstractArray.

Array Interface

Instead of working on the Vector{uType} directly, we can use the provided array interface.

sol[j]

to access the value at timestep j (if the timeseries was saved), and

sol.t[j]

to access the value of t at timestep j. For multi-dimensional systems, this will address first by component and lastly by time, and thus

sol[i,j]

will be the ith component at timestep j. Hence, sol[j][i] == sol[i, j]. This is done because Julia is column-major, so the leading dimension should be contiguous in memory. If the independent variables had shape (for example, was a matrix), then i is the linear index. We can also access solutions with shape:

sol[i,k,j]

gives the [i,k] component of the system at timestep j. The colon operator is supported, meaning that

sol[i,:]

gives the timeseries for the ith component.

Common Field Names

  • u: the solution values
  • t: the independent variable values, matching the length of the solution, if applicable
  • resid: the residual of the solution, if applicable
  • original: the solution object from the original solver, if it's a wrapper algorithm
  • retcode: see the documentation section on return codes
  • prob: the problem that was solved
  • alg: the algorithm used to solve the problem

Return Codes (RetCodes)

The solution types have a retcode field which returns a SciMLBase.ReturnCode.T (from EnumX.jl, see that package for the semantics of handling EnumX types) signifying the error or satisfaction state of the solution.

SciMLBase.ReturnCodeModule

SciML.ReturnCode

SciML.ReturnCode is the standard return code enum interface for the SciML interface. Return codes are notes given by the solvers to indicate the state of the solution, for example whether it successfully solved the equations, whether it failed to solve the equations, and importantly, why it exited.

Using SciML.ReturnCode

SciML.ReturnCode use the interface of EnumX.jl and thus inherits all of the behaviors of being an EnumX. This includes the Enum type itself being referred to as SciML.ReturnCode.T, and each of the constituent enum states being referred to via getproperty, i.e. SciML.ReturnCode.Success.

Note About Success Checking

Previous iterations of the interface suggested using sol.retcode == :Success, however, that is now not advised instead should be replaced with SciMLBase.successful_retcode(sol). The reason is that there are many different codes that can be interpreted as successful, such as ReturnCode.Terminated which means successfully used terminate!(integrator) to end an integration at a user-specified condition. As such, successful_retcode is the most general way to query for if the solver did not error.

Properties

  • successful_retcode(retcode::ReturnCode.T): Determines whether the output enum is considered a success state of the solver, i.e. the solver successfully solved the equations. ReturnCode.Success is the most basic form, simply declaring that it was successful, but many more informative success return codes exist as well.
source

Return Code Traits

SciMLBase.successful_retcodeFunction

successful_retcode(retcode::ReturnCode.T)::Bool successful_retcode(sol::AbstractSciMLSolution)::Bool

Returns a boolean for whether a return code should be interpreted as a form of success.

source

Specific Return Codes

SciMLBase.ReturnCode.DefaultConstant

ReturnCode.Default

The default state of the solver. If this return code is given, then the solving process is either still in process or the solver library has not been setup with the return code interface and thus the return code is undetermined.

Common Reasons for Seeing this Return Code

  • A common reason for Default return codes is that a solver is a non-SciML solver which does not fully conform to the interface. Please open an issue if this is seen and it will be improved.
  • Another common reason for a Default return code is if the solver is probed internally before the solving process is done, such as through the callback interface. Return codes are set to Default to start and are changed to Success and other return codes upon finishing the solving process or hitting a numerical difficulty.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.SuccessConstant

ReturnCode.Success

The success state of the solver. If this return code is given, then the solving process was successful, but no extra information about that success is given.

Common Reasons for Seeing this Return Code

  • This is the most common return code and most solvers will give this return code if the solving process went as expected without any errors or detected numerical issues.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.TerminatedConstant

ReturnCode.Terminated

The successful termination state of the solver. If this return code is given, then the solving process was successful at terminating the solve, usually through a callback affect! via terminate!(integrator).

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is if a user calls a callback which uses terminate!(integrator) to halt the integration at a user-chosen stopping point.
  • Another common reason for this return code is due to implicit terminate! statements in some library callbacks. For example, SteadyStateCallback uses terminate! internally, so solutions which reach steady state will have a ReturnCode.Terminated state instead of a ReturnCode.Success state. Similarly, problems solved via SteadyStateDiffEq.jl will have this ReturnCode.Terminated state if a timestepping method is used to solve to steady state.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.DtNaNConstant

ReturnCode.DtNaN

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was determined to be NaN and thus the solver could not continue.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the automatic dt selection algorithm is used but the starting derivative has a NaN or Inf derivative term. Double check that the f(u0,p,t0) term is well-defined without NaN or Inf values.
  • Another common reason for this return code is because of a user set dt which is calculated to be a NaN. If solve(prob,alg,dt=x), double check that x is not NaN.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.MaxItersConstant

ReturnCode.MaxIters

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the solver's iterations hit the maxiters either set by default or by the user in the solve/init command.

Note about Nonlinear Optimization

In nonlinear optimization, many solvers (such as OptimizationOptimisers.Adam) do not have an exit criteria other than iters == maxiters. In this case, the solvers will iterate until maxiters and exit with a Success return code, as that is a successful run of the solver and not considered to be an error state. Solves with early termination criteria, such as Optim.BFGS exiting when the gradient is sufficiently close to zero, will give ReturnCode.MaxIters on exits which require the maximum iteration.

Common Reasons for Seeing this Return Code

  • This commonly occurs in ODE solving if a non-stiff method (e.g. Tsit5) is used in an algorithm choice for a stiff ODE. It is recommended that in such cases, one tries a stiff ODE solver.
  • This commonly occurs in optimization and nonlinear solvers if the tolerance on solve to too low and cannot be achieved due to floating point error or the condition number of the solver matrix. Double check that the chosen tolerance is numerically possible.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.DtLessThanMinConstant

ReturnCode.DtLessThanMin

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was made to be less than dtmin, i.e. dt < dtmin.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the integration is going unstable. As f(u,p,t) -> infinity, the time steps required by the solver to accurately handle the dynamics decreases. When it gets sufficiently small, dtmin, an exit is thrown as the solution is likely unstable. dtmin is also chosen to be around the value where floating point issues cause t + dt == t, and thus a dt of that size is impossible at floating point precision.
  • Another common reason for this return code is if domain constraints are set, such as by using isoutofdomain, but the domain constraint is incorrect. For example, if one is solving the ODE f(u,p,t) = -u - 1, one may think "but I want a solution with u > 0 and thus I will set isoutofdomain(u,p,t) = u < 0. However, the true solution of this ODE is not positive, and thus what will occur is that the solver will try to decrease dt until it can give an accurate solution that is positive. As this is impossible, it will continue to shrink the dt until dt < dtmin and then exit with this return code.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.UnstableConstant

ReturnCode.Unstable

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the unstable_check function, as given by the unstable_check common keyword argument (or its default), give a true at the current state.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because u contains a NaN or Inf value. The default unstable_check only checks for these values.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.InitialFailureConstant

ReturnCode.InitialFailure

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because the initialization process failed.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the initialization process of a DAE solver failed to find consistent initial conditions, which can occur if the differentiation index of the DAE solver is too high. Most DAE solvers only allow for index-1 DAEs, and so an index-2 DAE will fail during this initialization. To solve this kind of problem, use ModelingToolkit.jl and its structural_simplify method to reduce the index of the DAE.
  • Another common reason for this return code is if the initial condition was not suitable for the numerical solve. For example, the initial point had a NaN or Inf. Or in optimization, this can occur if the initial point is outside of the bound constraints given by the user.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.ConvergenceFailureConstant

ReturnCode.ConvergenceFailure

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because internal nonlinear solver iterations failed to converge.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because an inappropriate nonlinear solver was chosen. If fixed point iteration is used on a stiff problem, it will be faster by avoiding the Jacobian but it will make a stiff ODE solver not stable for stiff problems!
  • For nonlinear solvers, this can occur if certain threshold was exceeded. For example, in approximate jacobian solvers like Broyden, Klement, etc. if the number of jacobian resets exceeds the threshold, then this return code is given.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.FailureConstant

ReturnCode.Failure

A failure exit state of the solver. If this return code is given, then the solving process was unsuccessful but no extra information is given.

Common Reasons for Seeing this Return Code

  • The most common reason for seeing this return code is because the solver is a wrapped solver (i.e. a Fortran code) which does not provide any extra information about its exit state. If this is from a Julia-based solver, please open an issue.

Properties

  • successful_retcode = false
source
SciMLBase.ReturnCode.ExactSolutionLeftConstant

ReturnCode.ExactSolutionLeft

The success state of the solver. If this return code is given, then the solving process was successful, and the left solution was given.

Common Reasons for Seeing this Return Code

  • The most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the left for x.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.ExactSolutionRightConstant

ReturnCode.ExactSolutionRight

The success state of the solver. If this return code is given, then the solving process was successful, and the right solution was given.

Common Reasons for Seeing this Return Code

  • The most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the right for x.

Properties

  • successful_retcode = true
source
SciMLBase.ReturnCode.FloatingPointLimitConstant

ReturnCode.FloatingPointLimit

The success state of the solver. If this return code is given, then the solving process was successful, and the closest floating point value to the solution was given.

Common Reasons for Seeing this Return Code

  • The most common reason for this return code is via a nonlinear solver, such as Falsi,

iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the closest floating point value to the true solution for x.

Properties

  • successful_retcode = true
source

Solution Traits

AbstractSciMLSolution API

Abstract SciML Solutions

diff --git a/dev/search_index.js b/dev/search_index.js index afdfb5bf3..066dc673a 100644 --- a/dev/search_index.js +++ b/dev/search_index.js @@ -1,3 +1,3 @@ var documenterSearchIndex = {"docs": -[{"location":"interfaces/SciMLFunctions/#scimlfunctions","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The SciML ecosystem provides an extensive interface for declaring extra functions associated with the differential equation's data. In traditional libraries there is usually only one option: the Jacobian. However, we allow for a large array of pre-computed functions to speed up the calculations. This is offered via the SciMLFunction types which can be passed to the problems.","category":"page"},{"location":"interfaces/SciMLFunctions/#Definition-of-the-AbstractSciMLFunction-Interface","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Definition of the AbstractSciMLFunction Interface","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The following standard principles should be adhered to across all AbstractSciMLFunction instantiations.","category":"page"},{"location":"interfaces/SciMLFunctions/#Common-Function-Choice-Definitions","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Common Function Choice Definitions","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The full interface available to the solvers is as follows:","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"jac: The Jacobian of the differential equation with respect to the state variable u at a time t with parameters p.\nparamjac: The Jacobian of the differential equation with respect to p at state u at time t.\nanalytic: Defines an analytical solution using u0 at time t with p which will cause the solvers to return errors. Used for testing.\nsyms: Allows you to name your variables for automatic names in plots and other output.\njac_prototype: Defines the type to be used for any internal Jacobians within the solvers.\nsparsity: Defines the sparsity pattern to be used for the sparse differentiation schemes. By default this is equal to jac_prototype. See the sparsity handling portion of this page for more information.\ncolorvec: The coloring pattern used by the sparse differentiator. See the sparsity handling portion of this page for more information.\nobserved: A function which allows for generating other observables from a solution.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Each function type additionally has some specific arguments, refer to their documentation for details.","category":"page"},{"location":"interfaces/SciMLFunctions/#In-place-Specification-and-No-Recompile-Mode","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"In-place Specification and No-Recompile Mode","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Each SciMLFunction type can be called with an \"is inplace\" (iip) choice.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"ODEFunction(f)\nODEFunction{iip}(f)","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"which is a boolean for whether the function is in the inplace form (mutating to change the first value). This is automatically determined using the methods table but note that for full type-inferability of the AbstractSciMLProblem this iip-ness should be specified.","category":"page"},{"location":"interfaces/SciMLFunctions/#Specialization-Choices","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Specialization Choices","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Each SciMLFunction type allows for specialization choices","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"ODEFunction{iip,specialization}(f)","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"which designates how the compiler should specialize on the model function f. For more details on specialization choices, see the SciMLProblems page.","category":"page"},{"location":"interfaces/SciMLFunctions/#Specifying-Jacobian-Types","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Specifying Jacobian Types","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The jac field of an inplace style SciMLFunction has the signature jac(J,u,p,t), which updates the Jacobian J in-place. The intended type for J can sometimes be inferred (e.g. when it is just a dense Matrix), but not in general. To supply the type information, you can provide a jac_prototype in the function's constructor.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The following example creates an inplace ODEFunction whose Jacobian is a Diagonal:","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"using LinearAlgebra\nf = (du,u,p,t) -> du .= t .* u\njac = (J,u,p,t) -> (J[1,1] = t; J[2,2] = t; J)\njp = Diagonal(zeros(2))\nfun = ODEFunction(f; jac=jac, jac_prototype=jp)","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Note that the integrators will always make a deep copy of fun.jac_prototype, so there's no worry of aliasing.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"In general the jacobian prototype can be anything that has mul! defined, in particular sparse matrices or custom lazy types that support mul!. A special case is when the jac_prototype is a AbstractSciMLOperator, in which case you do not need to supply jac as it is automatically set to update_coefficients!. Refer to the SciMLOperators section for more information on setting up time/parameter dependent operators.","category":"page"},{"location":"interfaces/SciMLFunctions/#Sparsity-Handling","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Sparsity Handling","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The solver libraries internally use packages such as FiniteDiff.jl and SparseDiffTools.jl for high performance calculation of sparse Jacobians and Hessians, along with matrix-free calculations of Jacobian-Vector products (Jv), vector-Jacobian products (v'J), and Hessian-vector products (H*v). The SciML interface gives users the ability to control these connections in order to allow for top notch performance.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The key arguments in the SciMLFunction is the prototype, which is an object that will be used as the underlying Jacobian/Hessian. Thus if one wants to use a sparse Jacobian, one should specify jac_prototype to be a sparse matrix. The sparsity pattern used in the differentiation scheme is defined by sparsity. By default, sparsity=jac_prototype, meaning that the sparse automatic differentiation scheme should specialize on the sparsity pattern given by the actual sparsity pattern. This can be overridden to say perform partial matrix coloring approximations. Additionally, the color vector for the sparse differentiation directions can be specified directly via colorvec. For more information on how these arguments control the differentiation process, see the aforementioned differentiation library documentations.","category":"page"},{"location":"interfaces/SciMLFunctions/#Traits","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Traits","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"SciMLBase.isinplace(f::SciMLBase.AbstractSciMLFunction)","category":"page"},{"location":"interfaces/SciMLFunctions/#AbstractSciMLFunction-API","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"AbstractSciMLFunction API","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/#Abstract-SciML-Functions","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Abstract SciML Functions","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"SciMLBase.AbstractDiffEqFunction\nSciMLBase.AbstractODEFunction\nSciMLBase.AbstractSDEFunction\nSciMLBase.AbstractDDEFunction\nSciMLBase.AbstractDAEFunction\nSciMLBase.AbstractRODEFunction\nSciMLBase.AbstractDiscreteFunction\nSciMLBase.AbstractSDDEFunction\nSciMLBase.AbstractNonlinearFunction","category":"page"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDiffEqFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDiffEqFunction","text":"abstract type AbstractDiffEqFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}\n\nBase for types defining differential equation functions.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractODEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractODEFunction","text":"abstract type AbstractODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractSDEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractSDEFunction","text":"abstract type AbstractSDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDDEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDDEFunction","text":"abstract type AbstractDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDAEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDAEFunction","text":"abstract type AbstractDAEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractRODEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractRODEFunction","text":"abstract type AbstractRODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDiscreteFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDiscreteFunction","text":"abstract type AbstractDiscreteFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractSDDEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractSDDEFunction","text":"abstract type AbstractSDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractNonlinearFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractNonlinearFunction","text":"abstract type AbstractNonlinearFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"fundamentals/FAQ/#Frequently-Asked-Questions","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"","category":"section"},{"location":"fundamentals/FAQ/#What-are-the-code-styling-rules-for-SciML?","page":"Frequently Asked Questions","title":"What are the code styling rules for SciML?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"All SciML libraries are supposed to follow SciMLStyle. Any deviation from that style is something to be fixed.","category":"page"},{"location":"fundamentals/FAQ/#Where-do-I-find-more-information-on-the-internals-of-some-packages?","page":"Frequently Asked Questions","title":"Where do I find more information on the internals of some packages?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"The SciML Developer Documentation describes the internals of some of the larger solver libraries at length.","category":"page"},{"location":"fundamentals/FAQ/#What-are-the-community-practices-that-SciML-developers-should-use?","page":"Frequently Asked Questions","title":"What are the community practices that SciML developers should use?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"See ColPrac: Contributor's Guide on Collaborative Practices for Community Packages","category":"page"},{"location":"fundamentals/FAQ/#Are-there-developer-programs-to-help-fund-parties-interested-in-helping-develop-SciML?","page":"Frequently Asked Questions","title":"Are there developer programs to help fund parties interested in helping develop SciML?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"Yes! See the SciML Developer Programs webpage.","category":"page"},{"location":"interfaces/Problems/#scimlproblems","page":"SciMLProblems","title":"SciMLProblems","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The cornerstone of the SciML common interface is the problem type definition. These definitions are the encoding of mathematical problems into a numerically computable form.","category":"page"},{"location":"interfaces/Problems/#Note-About-Symbolics-and-ModelingToolkit","page":"SciMLProblems","title":"Note About Symbolics and ModelingToolkit","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The symbolic analog to the problem interface is the ModelingToolkit AbstractSystem. For example, ODESystem is the symbolic analog to ODEProblem. Each of these system types have a method for constructing the associated problem and function types.","category":"page"},{"location":"interfaces/Problems/#Definition-of-the-AbstractSciMLProblem-Interface","page":"SciMLProblems","title":"Definition of the AbstractSciMLProblem Interface","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The following standard principles should be adhered to across all AbstractSciMLProblem instantiations.","category":"page"},{"location":"interfaces/Problems/#In-place-Specification","page":"SciMLProblems","title":"In-place Specification","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Each AbstractSciMLProblem type can be called with an \"is inplace\" (iip) choice. For example:","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"ODEProblem(f,u0,tspan,p)\nODEProblem{iip}(f,u0,tspan,p)","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"which is a boolean for whether the function is in the inplace form (mutating to change the first value). This is automatically determined using the methods table but note that for full type-inferability of the AbstractSciMLProblem this iip-ness should be specified.","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Additionally, the functions are fully specialized to reduce the runtimes. If one would instead like to not specialize on the functions to reduce compile time, then one can set recompile to false.","category":"page"},{"location":"interfaces/Problems/#Specialization-Levels","page":"SciMLProblems","title":"Specialization Levels","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Specialization levels in problem definitions are used to control the amount of compilation specialization is performed on the model functions in order to trade off between runtime performance, simplicity, and compile-time performance. The default choice of specialization is AutoSpecialize, which seeks to allow for using fully precompiled solvers in common scenarios but falls back to a runtime-optimal approach when further customization is used.","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Specialization levels are given as the second type parameter in AbstractSciMLProblem constructors. For example, this is done via:","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"ODEProblem{iip,specialization}(f,u0,tspan,p)","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Note that iip choice is required for specialization choices to be made.","category":"page"},{"location":"interfaces/Problems/#Specialization-Choices","page":"SciMLProblems","title":"Specialization Choices","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SciMLBase.AbstractSpecialization\nSciMLBase.AutoSpecialize\nSciMLBase.NoSpecialize\nSciMLBase.FunctionWrapperSpecialize\nSciMLBase.FullSpecialize","category":"page"},{"location":"interfaces/Problems/#SciMLBase.AbstractSpecialization","page":"SciMLProblems","title":"SciMLBase.AbstractSpecialization","text":"abstract type AbstractSpecialization\n\nSupertype for the specialization types. Controls the compilation and function specialization behavior of SciMLFunctions, ultimately controlling the runtime vs compile-time trade-off.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AutoSpecialize","page":"SciMLProblems","title":"SciMLBase.AutoSpecialize","text":"struct AutoSpecialize <: SciMLBase.AbstractSpecialization\n\nThe default specialization level for problem functions. AutoSpecialize works by applying a function wrap just-in-time before the solve process to disable just-in-time re-specialization of the solver to the specific choice of model f and thus allow for using a cached solver compilation from a different f. This wrapping process can lead to a small decreased runtime performance with a benefit of a greatly decreased compile-time.\n\nNote About Benchmarking and Runtime Optimality\n\nIt is recommended that AutoSpecialize is not used in any benchmarking due to the potential effect of function wrapping on runtimes. AutoSpecialize's use case is targeted at decreased latency for REPL performance and not for cases where where top runtime performance is required (such as in optimization loops). Generally, for non-stiff equations the cost will be minimal and potentially not even measurable. For stiff equations, function wrapping has the limitation that only chunk sized 1 Dual numbers are allowed, which can decrease Jacobian construction performance.\n\nLimitations of AutoSpecialize\n\nThe following limitations are not fundamental to the implementation of AutoSpecialize, but are instead chosen as a compromise between default precompilation times and ease of maintenance. Please open an issue to discuss lifting any potential limitations.\n\nAutoSpecialize is only setup to wrap the functions from in-place ODEs. Other cases are excluded for the time being due to time limitations.\nAutoSpecialize will only lead to compilation reuse if the ODEFunction's other functions (such as jac and tgrad) are the default nothing. These could be JIT wrapped as well in a future version.\nAutoSpecialize'd functions are only compatible with Jacobian calculations performed with chunk size 1, and only with tag DiffEqBase.OrdinaryDiffEqTag(). Thus ODE solvers written on the common interface must be careful to detect the AutoSpecialize case and perform differentiation under these constraints, use finite differencing, or manually unwrap before solving. This will lead to decreased runtime performance for sufficiently large Jacobians.\nAutoSpecialize only wraps on Julia v1.8 and higher.\nAutoSpecialize does not handle cases with units. If unitful values are detected, wrapping is automatically disabled.\nAutoSpecialize only wraps cases for which promote_rule is defined between u0 and dual numbers, u0 and t, and for which ArrayInterface.promote_eltype is defined on u0 to dual numbers.\nAutoSpecialize only wraps cases for which f.mass_matrix isa UniformScaling, the default.\nAutoSpecialize does not wrap cases where f isa AbstractSciMLOperator\nBy default, only the u0 isa Vector{Float64}, eltype(tspan) isa Float64, and typeof(p) isa Union{Vector{Float64},SciMLBase.NullParameters} are specialized by the solver libraries. Other forms can be specialized with AutoSpecialize, but must be done in the precompilation of downstream libraries.\nAutoSpecialized functions are manually unwrapped in adjoint methods in SciMLSensitivity.jl in order to allow compiler support for automatic differentiation. Improved versions of adjoints which decrease the recompilation surface will come in non-breaking updates.\n\nCases where automatic wrapping is disabled are equivalent to FullSpecialize.\n\nExample\n\nf(du,u,p,t) = (du .= u)\n\n# Note this is the same as ODEProblem(f, [1.0], (0.0,1.0))\n# If no preferences are set\nODEProblem{true, SciMLBase.AutoSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.NoSpecialize","page":"SciMLProblems","title":"SciMLBase.NoSpecialize","text":"struct NoSpecialize <: SciMLBase.AbstractSpecialization\n\nNoSpecialize forces SciMLFunctions to not specialize on the types of functions wrapped within it. This ultimately contributes to a form such that every prob.f type is the same, meaning compilation caches are fully reused, with the downside of losing runtime performance. NoSpecialize is the form that most fully trades off runtime for compile time. Unlike AutoSpecialize, NoSpecialize can be used with any SciMLFunction.\n\nExample\n\nf(du,u,p,t) = (du .= u)\nODEProblem{true, SciMLBase.NoSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.FunctionWrapperSpecialize","page":"SciMLProblems","title":"SciMLBase.FunctionWrapperSpecialize","text":"struct FunctionWrapperSpecialize <: SciMLBase.AbstractSpecialization\n\nFunctionWrapperSpecialize is an eager wrapping choice which performs a function wrapping during the ODEProblem construction. This performs the function wrapping at the earliest possible point, giving the best compile-time vs runtime performance, but with the difficulty that any usage of prob.f needs to account for the function wrapper's presence. While optimal in a performance sense, this method has many usability issues with nonstandard solvers and analyses as it requires unwrapping before re-wrapping for any type changes. Thus this method is not used by default. Given that the compile-time different is almost undetectable from AutoSpecialize, this method is mostly used as a benchmarking reference for speed of light for AutoSpecialize.\n\nLimitations of FunctionWrapperSpecialize\n\nFunctionWrapperSpecialize has all of the limitations of AutoSpecialize, but also includes the limitations:\n\nprob.f is directly specialized to the types of (u,p,t), and any usage of prob.f on other types first requires using SciMLBase.unwrapped_f(prob.f) to remove the function wrapper.\nFunctionWrapperSpecialize can only be used by the ODEProblem constructor. If an ODEFunction is being constructed, the user must manually use DiffEqBase.wrap_iip on f before calling ODEFunction{true,FunctionWrapperSpecialize}(f). This is a fundamental limitation of the approach as the types of (u,p,t) are required in the construction process and not accessible in the AbstractSciMLFunction constructors.\n\nExample\n\nf(du,u,p,t) = (du .= u)\nODEProblem{true, SciMLBase.FunctionWrapperSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.FullSpecialize","page":"SciMLProblems","title":"SciMLBase.FullSpecialize","text":"struct FullSpecialize <: SciMLBase.AbstractSpecialization\n\nFullSpecialize is an eager specialization choice which directly types the AbstractSciMLFunction struct to match the type of the model f. This forces recompilation of the solver on each new function type f, leading to the most compile times with the benefit of having the best runtime performance.\n\nFullSpecialize should be used in all cases where top runtime performance is required, such as in long-running simulations and benchmarking.\n\nExample\n\nf(du,u,p,t) = (du .= u)\nODEProblem{true, SciMLBase.FullSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"note: Note\nThe specialization level must be precompile snooped in the appropriate solver package in order to enable the full precompilation and system image generation for zero-latency usage. By default, this is only done with AutoSpecialize and on types u isa Vector{Float64}, eltype(tspan) isa Float64, and p isa Union{Vector{Float64}, SciMLBase.NullParameters}. Precompilation snooping in the solvers can be done using the Preferences.jl setup on the appropriate solver. See the solver library's documentation for more details.","category":"page"},{"location":"interfaces/Problems/#Default-Parameters","page":"SciMLProblems","title":"Default Parameters","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"By default, AbstractSciMLProblem types use the SciMLBase.NullParameters() singleton to define the absence of parameters by default. The reason is because this throws an informative error if the parameter is used or accessed within the user's function, for example, p[1] will throw an informative error about forgetting to pass parameters.","category":"page"},{"location":"interfaces/Problems/#Keyword-Argument-Splatting","page":"SciMLProblems","title":"Keyword Argument Splatting","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"All AbstractSciMLProblem types allow for passing keyword arguments that would get forwarded to the solver. The reason for this is that in many cases, like in EnsembleProblem usage, a AbstractSciMLProblem might be associated with some solver configuration, such as a callback or tolerance. Thus, for flexibility the extra keyword arguments to the AbstractSciMLProblem are carried to the solver.","category":"page"},{"location":"interfaces/Problems/#problem_type","page":"SciMLProblems","title":"problem_type","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"AbstractSciMLProblem types include a non-public API definition of problem_type which holds a trait type corresponding to the way the AbstractSciMLProblem was constructed. For example, if a SecondOrderODEProblem constructor is used, the returned problem is simply a ODEProblem for interoperability with any ODEProblem algorithm. However, in this case the problem_type will be populated with the SecondOrderODEProblem type, indicating the original definition and extra structure.","category":"page"},{"location":"interfaces/Problems/#Remake","page":"SciMLProblems","title":"Remake","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"remake","category":"page"},{"location":"interfaces/Problems/#SciMLBase.remake","page":"SciMLProblems","title":"SciMLBase.remake","text":"remake(thing; )\n\nRe-construct thing with new field values specified by the keyword arguments.\n\n\n\n\n\nremake(prob::ODEProblem; f = missing, u0 = missing, tspan = missing,\n p = missing, kwargs = missing, _kwargs...)\n\nRemake the given ODEProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.\n\n\n\n\n\nremake(prob::BVProblem; f = missing, u0 = missing, tspan = missing,\n p = missing, kwargs = missing, problem_type = missing, _kwargs...)\n\nRemake the given BVProblem.\n\n\n\n\n\nremake(prob::SDEProblem; f = missing, u0 = missing, tspan = missing,\n p = missing, noise = missing, noise_rate_prototype = missing,\n seed = missing, kwargs = missing, _kwargs...)\n\nRemake the given SDEProblem.\n\n\n\n\n\nremake(prob::OptimizationProblem; f = missing, u0 = missing, p = missing,\n lb = missing, ub = missing, int = missing, lcons = missing, ucons = missing,\n sense = missing, kwargs = missing, _kwargs...)\n\nRemake the given OptimizationProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.\n\n\n\n\n\nremake(prob::NonlinearProblem; f = missing, u0 = missing, p = missing,\n problem_type = missing, kwargs = missing, _kwargs...)\n\nRemake the given NonlinearProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.\n\n\n\n\n\nremake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing,\n kwargs = missing, _kwargs...)\n\nRemake the given NonlinearLeastSquaresProblem.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Problems/#Problem-Traits","page":"SciMLProblems","title":"Problem Traits","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SciMLBase.isinplace(prob::SciMLBase.AbstractDEProblem)\nSciMLBase.is_diagonal_noise","category":"page"},{"location":"interfaces/Problems/#SciMLBase.isinplace-Tuple{SciMLBase.AbstractDEProblem}","page":"SciMLProblems","title":"SciMLBase.isinplace","text":"isinplace(prob::AbstractSciMLProblem)\n\nDetermine whether the function of the given problem operates in place or not.\n\n\n\n\n\n","category":"method"},{"location":"interfaces/Problems/#SciMLBase.is_diagonal_noise","page":"SciMLProblems","title":"SciMLBase.is_diagonal_noise","text":"is_diagonal_noise(prob::AbstractSciMLProblem)\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Problems/#AbstractSciMLProblem-API","page":"SciMLProblems","title":"AbstractSciMLProblem API","text":"","category":"section"},{"location":"interfaces/Problems/#Defaults-and-Preferences","page":"SciMLProblems","title":"Defaults and Preferences","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SpecializationLevel at SciMLBase can be used to set the default specialization level. The following shows how to set the specialization default to FullSpecialize:","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"using Preferences, UUIDs\nset_preferences!(UUID(\"0bca4576-84f4-4d90-8ffe-ffa030f20462\"), \"SpecializationLevel\" => \"FullSpecialize\")","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The default is AutoSpecialize.","category":"page"},{"location":"interfaces/Problems/#Abstract-SciMLProblems","page":"SciMLProblems","title":"Abstract SciMLProblems","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SciMLBase.AbstractSciMLProblem\nSciMLBase.AbstractDEProblem\nSciMLBase.AbstractLinearProblem\nSciMLBase.AbstractNonlinearProblem\nSciMLBase.AbstractIntegralProblem\nSciMLBase.AbstractOptimizationProblem\nSciMLBase.AbstractNoiseProblem\nSciMLBase.AbstractODEProblem\nSciMLBase.AbstractDiscreteProblem\nSciMLBase.AbstractAnalyticalProblem\nSciMLBase.AbstractRODEProblem\nSciMLBase.AbstractSDEProblem\nSciMLBase.AbstractDAEProblem\nSciMLBase.AbstractDDEProblem\nSciMLBase.AbstractConstantLagDDEProblem\nSciMLBase.AbstractSecondOrderODEProblem\nSciMLBase.AbstractBVProblem\nSciMLBase.AbstractJumpProblem\nSciMLBase.AbstractSDDEProblem\nSciMLBase.AbstractConstantLagSDDEProblem\nSciMLBase.AbstractPDEProblem","category":"page"},{"location":"interfaces/Problems/#SciMLBase.AbstractSciMLProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSciMLProblem","text":"abstract type AbstractSciMLProblem\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDEProblem","text":"abstract type AbstractDEProblem <: SciMLBase.AbstractSciMLProblem\n\nBase type for all DifferentialEquations.jl problems. Concrete subtypes of AbstractDEProblem contain the necessary information to fully define a differential equation of the corresponding type.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractLinearProblem","page":"SciMLProblems","title":"SciMLBase.AbstractLinearProblem","text":"abstract type AbstractLinearProblem{bType, isinplace} <: SciMLBase.AbstractSciMLProblem\n\nBase for types which define linear systems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractNonlinearProblem","page":"SciMLProblems","title":"SciMLBase.AbstractNonlinearProblem","text":"abstract type AbstractNonlinearProblem{uType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define nonlinear solve problems (f(u)=0).\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractIntegralProblem","page":"SciMLProblems","title":"SciMLBase.AbstractIntegralProblem","text":"abstract type AbstractIntegralProblem{isinplace} <: SciMLBase.AbstractSciMLProblem\n\nBase for types which define integrals suitable for quadrature.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractOptimizationProblem","page":"SciMLProblems","title":"SciMLBase.AbstractOptimizationProblem","text":"abstract type AbstractOptimizationProblem{isinplace} <: SciMLBase.AbstractSciMLProblem\n\nBase for types which define equations for optimization.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractNoiseProblem","page":"SciMLProblems","title":"SciMLBase.AbstractNoiseProblem","text":"abstract type AbstractNoiseProblem <: SciMLBase.AbstractDEProblem\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractODEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractODEProblem","text":"abstract type AbstractODEProblem{uType, tType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define ODE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDiscreteProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDiscreteProblem","text":"abstract type AbstractDiscreteProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\nBase for types which define discrete problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractAnalyticalProblem","page":"SciMLProblems","title":"SciMLBase.AbstractAnalyticalProblem","text":"abstract type AbstractAnalyticalProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractRODEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractRODEProblem","text":"abstract type AbstractRODEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractDEProblem\n\nBase for types which define RODE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractSDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSDEProblem","text":"abstract type AbstractSDEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractRODEProblem{uType, tType, isinplace, ND}\n\nBase for types which define SDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDAEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDAEProblem","text":"abstract type AbstractDAEProblem{uType, duType, tType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define DAE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDDEProblem","text":"abstract type AbstractDDEProblem{uType, tType, lType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define DDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractConstantLagDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractConstantLagDDEProblem","text":"abstract type AbstractConstantLagDDEProblem{uType, tType, lType, isinplace} <: SciMLBase.AbstractDDEProblem{uType, tType, lType, isinplace}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractSecondOrderODEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSecondOrderODEProblem","text":"abstract type AbstractSecondOrderODEProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractBVProblem","page":"SciMLProblems","title":"SciMLBase.AbstractBVProblem","text":"abstract type AbstractBVProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\nBase for types which define BVP problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractJumpProblem","page":"SciMLProblems","title":"SciMLBase.AbstractJumpProblem","text":"abstract type AbstractJumpProblem{P, J} <: SciMLBase.AbstractDEProblem\n\nBase for types which define jump problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractSDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSDDEProblem","text":"abstract type AbstractSDDEProblem{uType, tType, lType, isinplace, ND} <: SciMLBase.AbstractDEProblem\n\nBase for types which define SDDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractConstantLagSDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractConstantLagSDDEProblem","text":"abstract type AbstractConstantLagSDDEProblem{uType, tType, lType, isinplace, ND} <: SciMLBase.AbstractSDDEProblem{uType, tType, lType, isinplace, ND}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractPDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractPDEProblem","text":"abstract type AbstractPDEProblem <: SciMLBase.AbstractDEProblem\n\nBase for types which define PDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLSolutions","page":"SciMLSolutions","title":"SciMLSolutions","text":"","category":"section"},{"location":"interfaces/Solutions/#Definition-of-the-AbstractSciMLSolution-Interface","page":"SciMLSolutions","title":"Definition of the AbstractSciMLSolution Interface","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"All AbstractSciMLSolution types are a subset of some AbstractArray. Types with time series (like ODESolution) are subtypes of RecursiveArrayTools.AbstractVectorOfArray and RecursiveArrayTools.AbstractDiffEqArray where appropriate. Types without a time series (like OptimizationSolution) are directly subsets of AbstractArray.","category":"page"},{"location":"interfaces/Solutions/#Array-Interface","page":"SciMLSolutions","title":"Array Interface","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"Instead of working on the Vector{uType} directly, we can use the provided array interface.","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"to access the value at timestep j (if the timeseries was saved), and","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol.t[j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"to access the value of t at timestep j. For multi-dimensional systems, this will address first by component and lastly by time, and thus","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[i,j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"will be the ith component at timestep j. Hence, sol[j][i] == sol[i, j]. This is done because Julia is column-major, so the leading dimension should be contiguous in memory. If the independent variables had shape (for example, was a matrix), then i is the linear index. We can also access solutions with shape:","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[i,k,j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"gives the [i,k] component of the system at timestep j. The colon operator is supported, meaning that","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[i,:]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"gives the timeseries for the ith component.","category":"page"},{"location":"interfaces/Solutions/#Common-Field-Names","page":"SciMLSolutions","title":"Common Field Names","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"u: the solution values\nt: the independent variable values, matching the length of the solution, if applicable\nresid: the residual of the solution, if applicable\noriginal: the solution object from the original solver, if it's a wrapper algorithm\nretcode: see the documentation section on return codes\nprob: the problem that was solved\nalg: the algorithm used to solve the problem","category":"page"},{"location":"interfaces/Solutions/#retcodes","page":"SciMLSolutions","title":"Return Codes (RetCodes)","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"The solution types have a retcode field which returns a SciMLBase.ReturnCode.T (from EnumX.jl, see that package for the semantics of handling EnumX types) signifying the error or satisfaction state of the solution.","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.ReturnCode","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode","page":"SciMLSolutions","title":"SciMLBase.ReturnCode","text":"SciML.ReturnCode\n\nSciML.ReturnCode is the standard return code enum interface for the SciML interface. Return codes are notes given by the solvers to indicate the state of the solution, for example whether it successfully solved the equations, whether it failed to solve the equations, and importantly, why it exited.\n\nUsing SciML.ReturnCode\n\nSciML.ReturnCode use the interface of EnumX.jl and thus inherits all of the behaviors of being an EnumX. This includes the Enum type itself being referred to as SciML.ReturnCode.T, and each of the constituent enum states being referred to via getproperty, i.e. SciML.ReturnCode.Success.\n\nNote About Success Checking\n\nPrevious iterations of the interface suggested using sol.retcode == :Success, however, that is now not advised instead should be replaced with SciMLBase.successful_retcode(sol). The reason is that there are many different codes that can be interpreted as successful, such as ReturnCode.Terminated which means successfully used terminate!(integrator) to end an integration at a user-specified condition. As such, successful_retcode is the most general way to query for if the solver did not error.\n\nProperties\n\nsuccessful_retcode(retcode::ReturnCode.T): Determines whether the output enum is considered a success state of the solver, i.e. the solver successfully solved the equations. ReturnCode.Success is the most basic form, simply declaring that it was successful, but many more informative success return codes exist as well.\n\n\n\n\n\n","category":"module"},{"location":"interfaces/Solutions/#Return-Code-Traits","page":"SciMLSolutions","title":"Return Code Traits","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.successful_retcode","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.successful_retcode","page":"SciMLSolutions","title":"SciMLBase.successful_retcode","text":"successful_retcode(retcode::ReturnCode.T)::Bool successful_retcode(sol::AbstractSciMLSolution)::Bool\n\nReturns a boolean for whether a return code should be interpreted as a form of success.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Solutions/#Specific-Return-Codes","page":"SciMLSolutions","title":"Specific Return Codes","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.ReturnCode.Default\nSciMLBase.ReturnCode.Success\nSciMLBase.ReturnCode.Terminated\nSciMLBase.ReturnCode.DtNaN\nSciMLBase.ReturnCode.MaxIters\nSciMLBase.ReturnCode.DtLessThanMin\nSciMLBase.ReturnCode.Unstable\nSciMLBase.ReturnCode.InitialFailure\nSciMLBase.ReturnCode.ConvergenceFailure\nSciMLBase.ReturnCode.Failure\nSciMLBase.ReturnCode.ExactSolutionLeft\nSciMLBase.ReturnCode.ExactSolutionRight\nSciMLBase.ReturnCode.FloatingPointLimit","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Default","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Default","text":"ReturnCode.Default\n\nThe default state of the solver. If this return code is given, then the solving process is either still in process or the solver library has not been setup with the return code interface and thus the return code is undetermined.\n\nCommon Reasons for Seeing this Return Code\n\nA common reason for Default return codes is that a solver is a non-SciML solver which does not fully conform to the interface. Please open an issue if this is seen and it will be improved.\nAnother common reason for a Default return code is if the solver is probed internally before the solving process is done, such as through the callback interface. Return codes are set to Default to start and are changed to Success and other return codes upon finishing the solving process or hitting a numerical difficulty.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Success","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Success","text":"ReturnCode.Success\n\nThe success state of the solver. If this return code is given, then the solving process was successful, but no extra information about that success is given.\n\nCommon Reasons for Seeing this Return Code\n\nThis is the most common return code and most solvers will give this return code if the solving process went as expected without any errors or detected numerical issues.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Terminated","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Terminated","text":"ReturnCode.Terminated\n\nThe successful termination state of the solver. If this return code is given, then the solving process was successful at terminating the solve, usually through a callback affect! via terminate!(integrator).\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is if a user calls a callback which uses terminate!(integrator) to halt the integration at a user-chosen stopping point.\nAnother common reason for this return code is due to implicit terminate! statements in some library callbacks. For example, SteadyStateCallback uses terminate! internally, so solutions which reach steady state will have a ReturnCode.Terminated state instead of a ReturnCode.Success state. Similarly, problems solved via SteadyStateDiffEq.jl will have this ReturnCode.Terminated state if a timestepping method is used to solve to steady state.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.DtNaN","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.DtNaN","text":"ReturnCode.DtNaN\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was determined to be NaN and thus the solver could not continue.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the automatic dt selection algorithm is used but the starting derivative has a NaN or Inf derivative term. Double check that the f(u0,p,t0) term is well-defined without NaN or Inf values.\nAnother common reason for this return code is because of a user set dt which is calculated to be a NaN. If solve(prob,alg,dt=x), double check that x is not NaN.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.MaxIters","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.MaxIters","text":"ReturnCode.MaxIters\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the solver's iterations hit the maxiters either set by default or by the user in the solve/init command.\n\nNote about Nonlinear Optimization\n\nIn nonlinear optimization, many solvers (such as OptimizationOptimisers.Adam) do not have an exit criteria other than iters == maxiters. In this case, the solvers will iterate until maxiters and exit with a Success return code, as that is a successful run of the solver and not considered to be an error state. Solves with early termination criteria, such as Optim.BFGS exiting when the gradient is sufficiently close to zero, will give ReturnCode.MaxIters on exits which require the maximum iteration.\n\nCommon Reasons for Seeing this Return Code\n\nThis commonly occurs in ODE solving if a non-stiff method (e.g. Tsit5) is used in an algorithm choice for a stiff ODE. It is recommended that in such cases, one tries a stiff ODE solver.\nThis commonly occurs in optimization and nonlinear solvers if the tolerance on solve to too low and cannot be achieved due to floating point error or the condition number of the solver matrix. Double check that the chosen tolerance is numerically possible.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.DtLessThanMin","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.DtLessThanMin","text":"ReturnCode.DtLessThanMin\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was made to be less than dtmin, i.e. dt < dtmin.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the integration is going unstable. As f(u,p,t) -> infinity, the time steps required by the solver to accurately handle the dynamics decreases. When it gets sufficiently small, dtmin, an exit is thrown as the solution is likely unstable. dtmin is also chosen to be around the value where floating point issues cause t + dt == t, and thus a dt of that size is impossible at floating point precision.\nAnother common reason for this return code is if domain constraints are set, such as by using isoutofdomain, but the domain constraint is incorrect. For example, if one is solving the ODE f(u,p,t) = -u - 1, one may think \"but I want a solution with u > 0 and thus I will set isoutofdomain(u,p,t) = u < 0. However, the true solution of this ODE is not positive, and thus what will occur is that the solver will try to decrease dt until it can give an accurate solution that is positive. As this is impossible, it will continue to shrink the dt until dt < dtmin and then exit with this return code.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Unstable","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Unstable","text":"ReturnCode.Unstable\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the unstable_check function, as given by the unstable_check common keyword argument (or its default), give a true at the current state.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because u contains a NaN or Inf value. The default unstable_check only checks for these values.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.InitialFailure","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.InitialFailure","text":"ReturnCode.InitialFailure\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because the initialization process failed.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the initialization process of a DAE solver failed to find consistent initial conditions, which can occur if the differentiation index of the DAE solver is too high. Most DAE solvers only allow for index-1 DAEs, and so an index-2 DAE will fail during this initialization. To solve this kind of problem, use ModelingToolkit.jl and its structural_simplify method to reduce the index of the DAE.\nAnother common reason for this return code is if the initial condition was not suitable for the numerical solve. For example, the initial point had a NaN or Inf. Or in optimization, this can occur if the initial point is outside of the bound constraints given by the user.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.ConvergenceFailure","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.ConvergenceFailure","text":"ReturnCode.ConvergenceFailure\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because internal nonlinear solver iterations failed to converge.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because an inappropriate nonlinear solver was chosen. If fixed point iteration is used on a stiff problem, it will be faster by avoiding the Jacobian but it will make a stiff ODE solver not stable for stiff problems!\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Failure","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Failure","text":"ReturnCode.Failure\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful but no extra information is given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the solver is a wrapped solver (i.e. a Fortran code) which does not provide any extra information about its exit state. If this is from a Julia-based solver, please open an issue.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.ExactSolutionLeft","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.ExactSolutionLeft","text":"ReturnCode.ExactSolutionLeft\n\nThe success state of the solver. If this return code is given, then the solving process was successful, and the left solution was given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the left for x.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.ExactSolutionRight","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.ExactSolutionRight","text":"ReturnCode.ExactSolutionRight\n\nThe success state of the solver. If this return code is given, then the solving process was successful, and the right solution was given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the right for x.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.FloatingPointLimit","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.FloatingPointLimit","text":"ReturnCode.FloatingPointLimit\n\nThe success state of the solver. If this return code is given, then the solving process was successful, and the closest floating point value to the solution was given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for this return code is via a nonlinear solver, such as Falsi,\n\niterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the closest floating point value to the true solution for x.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#Solution-Traits","page":"SciMLSolutions","title":"Solution Traits","text":"","category":"section"},{"location":"interfaces/Solutions/#AbstractSciMLSolution-API","page":"SciMLSolutions","title":"AbstractSciMLSolution API","text":"","category":"section"},{"location":"interfaces/Solutions/#Abstract-SciML-Solutions","page":"SciMLSolutions","title":"Abstract SciML Solutions","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.AbstractSciMLSolution\nSciMLBase.AbstractNoTimeSolution\nSciMLBase.AbstractTimeseriesSolution\nSciMLBase.AbstractNoiseProcess\nSciMLBase.AbstractEnsembleSolution\nSciMLBase.AbstractLinearSolution\nSciMLBase.AbstractNonlinearSolution\nSciMLBase.AbstractIntegralSolution\nSciMLBase.AbstractSteadyStateSolution\nSciMLBase.AbstractAnalyticalSolution\nSciMLBase.AbstractODESolution\nSciMLBase.AbstractDDESolution\nSciMLBase.AbstractRODESolution\nSciMLBase.AbstractDAESolution","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.AbstractSciMLSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractSciMLSolution","text":"Union of all base solution types.\n\nUses a Union so that solution types can be <: AbstractArray\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractNoTimeSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractNoTimeSolution","text":"abstract type AbstractNoTimeSolution{T, N} <: AbstractArray{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractTimeseriesSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractTimeseriesSolution","text":"abstract type AbstractTimeseriesSolution{T, N, A} <: RecursiveArrayTools.AbstractDiffEqArray{T, N, A}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractNoiseProcess","page":"SciMLSolutions","title":"SciMLBase.AbstractNoiseProcess","text":"abstract type AbstractNoiseProcess{T, N, A, isinplace} <: RecursiveArrayTools.AbstractDiffEqArray{T, N, A}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractEnsembleSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractEnsembleSolution","text":"abstract type AbstractEnsembleSolution{T, N, A} <: RecursiveArrayTools.AbstractVectorOfArray{T, N, A}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractLinearSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractLinearSolution","text":"abstract type AbstractLinearSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractNonlinearSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractNonlinearSolution","text":"abstract type AbstractNonlinearSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractIntegralSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractIntegralSolution","text":"abstract type AbstractIntegralSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractSteadyStateSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractSteadyStateSolution","text":"abstract type AbstractNonlinearSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractAnalyticalSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractAnalyticalSolution","text":"abstract type AbstractAnalyticalSolution{T, N, S} <: SciMLBase.AbstractTimeseriesSolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractODESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractODESolution","text":"abstract type AbstractODESolution{T, N, S} <: SciMLBase.AbstractTimeseriesSolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractDDESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractDDESolution","text":"abstract type AbstractDDESolution{T, N, S} <: SciMLBase.AbstractODESolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractRODESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractRODESolution","text":"abstract type AbstractRODESolution{T, N, S} <: SciMLBase.AbstractODESolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractDAESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractDAESolution","text":"abstract type AbstractDAESolution{T, N, S} <: SciMLBase.AbstractODESolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Common_Keywords/#Common-Keyword-Arguments","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"The following defines the keyword arguments which are meant to be preserved throughout all of the AbstractSciMLProblem cases (where applicable).","category":"page"},{"location":"interfaces/Common_Keywords/#Default-Algorithm-Hinting","page":"Common Keyword Arguments","title":"Default Algorithm Hinting","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"To help choose the default algorithm, the keyword argument alg_hints is provided to solve. alg_hints is a Vector{Symbol} which describe the problem at a high level to the solver. The options are:","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"This functionality is derived via the benchmarks in SciMLBenchmarks.jl","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"Currently this is only implemented for the differential equation solvers.","category":"page"},{"location":"interfaces/Common_Keywords/#Output-Control","page":"Common Keyword Arguments","title":"Output Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"These arguments control the output behavior of the solvers. It defaults to maximum output to give the best interactive user experience, but can be reduced all the way to only saving the solution at the final timepoint.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"The following options are all related to output control. See the \"Examples\" section at the end of this page for some example usage.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"dense: Denotes whether to save the extra pieces required for dense (continuous) output. Default is save_everystep && !isempty(saveat) for algorithms which have the ability to produce dense output, i.e. by default it's true unless the user has turned off saving on steps or has chosen a saveat value. If dense=false, the solution still acts like a function, and sol(t) is a linear interpolation between the saved time points.\nsaveat: Denotes specific times to save the solution at, during the solving phase. The solver will save at each of the timepoints in this array in the most efficient manner available to the solver. If only saveat is given, then the arguments save_everystep and dense are false by default. If saveat is given a number, then it will automatically expand to tspan[1]:saveat:tspan[2]. For methods where interpolation is not possible, saveat may be equivalent to tstops. The default value is [].\nsave_idxs: Denotes the indices for the components of the equation to save. Defaults to saving all indices. For example, if you are solving a 3-dimensional ODE, and given save_idxs = [1, 3], only the first and third components of the solution will be outputted. Notice that of course in this case the outputted solution will be two-dimensional.\ntstops: Denotes extra times that the timestepping algorithm must step to. This should be used to help the solver deal with discontinuities and singularities, since stepping exactly at the time of the discontinuity will improve accuracy. If a method cannot change timesteps (fixed timestep multistep methods), then tstops will use an interpolation, matching the behavior of saveat. If a method cannot change timesteps and also cannot interpolate, then tstops must be a multiple of dt or else an error will be thrown. Default is [].\nd_discontinuities: Denotes locations of discontinuities in low order derivatives. This will force FSAL algorithms which assume derivative continuity to re-evaluate the derivatives at the point of discontinuity. The default is [].\nsave_everystep: Saves the result at every step. Default is true if isempty(saveat).\nsave_on: Denotes whether intermediate solutions are saved. This overrides the settings of dense, saveat and save_everystep and is used by some applications to manually turn off saving temporarily. Everyday use of the solvers should leave this unchanged. Defaults to true.\nsave_start: Denotes whether the initial condition should be included in the solution type as the first timepoint. Defaults to true.\nsave_end: Denotes whether the final timepoint is forced to be saved, regardless of the other saving settings. Defaults to true.\ninitialize_save: Denotes whether to save after the callback initialization phase (when u_modified=true). Defaults to true.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"Note that dense requires save_everystep=true and saveat=false.","category":"page"},{"location":"interfaces/Common_Keywords/#Stepsize-Control","page":"Common Keyword Arguments","title":"Stepsize Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"These arguments control the timestepping routines.","category":"page"},{"location":"interfaces/Common_Keywords/#Basic-Stepsize-Control","page":"Common Keyword Arguments","title":"Basic Stepsize Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"adaptive: Turns on adaptive timestepping for appropriate methods. Default is true.\nabstol: Absolute tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).\nreltol: Relative tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).\ndt: Sets the initial stepsize. This is also the stepsize for fixed timestep methods. Defaults to an automatic choice if the method is adaptive.\ndtmax: Maximum dt for adaptive timestepping. Defaults are package-dependent.\ndtmin: Minimum dt for adaptive timestepping. Defaults are package-dependent.","category":"page"},{"location":"interfaces/Common_Keywords/#Fixed-Stepsize-Usage","page":"Common Keyword Arguments","title":"Fixed Stepsize Usage","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"Note that if a method does not have adaptivity, the following rules apply:","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"If dt is set, then the algorithm will step with size dt each iteration.\nIf tstops and dt are both set, then the algorithm will step with either a size dt, or use a smaller step to hit the tstops point.\nIf tstops is set without dt, then the algorithm will step directly to each value in tstops\nIf neither dt nor tstops are set, the solver will throw an error.","category":"page"},{"location":"interfaces/Common_Keywords/#Memory-Optimizations","page":"Common Keyword Arguments","title":"Memory Optimizations","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"alias_u0: allows the solver to alias the initial condition array that is contained in the problem struct. Defaults to false.\ncache: pass a solver cache to decrease the construction time. This is not implemented for any of the problem interfaces at this moment.","category":"page"},{"location":"interfaces/Common_Keywords/#Miscellaneous","page":"Common Keyword Arguments","title":"Miscellaneous","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"maxiters: Maximum number of iterations before stopping.\ncallback: Specifies a callback function that is called between iterations.\nverbose: Toggles whether warnings are thrown when the solver exits early. Defaults to true.","category":"page"},{"location":"interfaces/Common_Keywords/#Progress-Monitoring","page":"Common Keyword Arguments","title":"Progress Monitoring","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"These arguments control the usage of the progressbar in the logger.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"progress: Turns on/off the Juno progressbar. Default is false.\nprogress_steps: Numbers of steps between updates of the progress bar. Default is 1000.\nprogress_name: Controls the name of the progressbar. Default is the name of the problem type.\nprogress_message: Controls the message with the progressbar. Defaults to showing dt, t, the maximum of u.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"The progress bars all use the Julia Logging interface in order to be generic to the IDE or programming tool that is used. For more information on how this is all put together, see this discussion.","category":"page"},{"location":"interfaces/Common_Keywords/#Error-Calculations","page":"Common Keyword Arguments","title":"Error Calculations","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"If you are using the test problems (i.e. SciMLFunctions where f.analytic is defined), then options control the errors which are calculated. By default, any cheap error estimates are always calculated. Extra keyword arguments include:","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"timeseries_errors\ndense_errors","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"for specifying more expensive errors.","category":"page"},{"location":"interfaces/Common_Keywords/#Automatic-Differentiation-Control","page":"Common Keyword Arguments","title":"Automatic Differentiation Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"See the Automatic Differentiation page for a full description of sensealg","category":"page"},{"location":"interfaces/Algorithms/#SciMLAlgorithms","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"","category":"section"},{"location":"interfaces/Algorithms/#Definition-of-the-AbstractSciMLAlgorithm-Interface","page":"SciMLAlgorithms","title":"Definition of the AbstractSciMLAlgorithm Interface","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"SciMLAlgorithms are defined as types which have dispatches to the function signature:","category":"page"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"CommonSolve.solve(prob::AbstractSciMLProblem,alg::AbstractSciMLAlgorithm;kwargs...)","category":"page"},{"location":"interfaces/Algorithms/#Algorithm-Specific-Arguments","page":"SciMLAlgorithms","title":"Algorithm-Specific Arguments","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"Note that because the keyword arguments of solve are designed to be common across the whole problem type, algorithms should have the algorithm-specific keyword arguments defined as part of the algorithm constructor. For example, Rodas5 has a choice of autodiff::Bool which is not common across all ODE solvers, and thus autodiff is a algorithm-specific keyword argument handled via Rodas5(autodiff=true).","category":"page"},{"location":"interfaces/Algorithms/#Remake","page":"SciMLAlgorithms","title":"Remake","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"Note that remake is applicable to AbstractSciMLAlgorithm types, but this is not used in the public API. It's used for solvers to swap out components like ForwardDiff chunk sizes.","category":"page"},{"location":"interfaces/Algorithms/#Common-Algorithm-Keyword-Arguments","page":"SciMLAlgorithms","title":"Common Algorithm Keyword Arguments","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"Commonly used algorithm keyword arguments are:","category":"page"},{"location":"interfaces/Algorithms/#Traits","page":"SciMLAlgorithms","title":"Traits","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"SciMLBase.isautodifferentiable\nSciMLBase.allows_arbitrary_number_types\nSciMLBase.allowscomplex\nSciMLBase.isadaptive\nSciMLBase.isdiscrete\nSciMLBase.forwarddiffs_model\nSciMLBase.forwarddiffs_model_time","category":"page"},{"location":"interfaces/Algorithms/#SciMLBase.isautodifferentiable","page":"SciMLAlgorithms","title":"SciMLBase.isautodifferentiable","text":"isautodifferentiable(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.\n\nDefaults to false as only pure-Julia algorithms can have this be true.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.allows_arbitrary_number_types","page":"SciMLAlgorithms","title":"SciMLBase.allows_arbitrary_number_types","text":"allowsarbitrarynumber_types(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.\n\nDefaults to false as only pure-Julia algorithms can have this be true.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.allowscomplex","page":"SciMLAlgorithms","title":"SciMLBase.allowscomplex","text":"allowscomplex(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm is compatible with having complex numbers as the state variables.\n\nDefaults to false.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.isadaptive","page":"SciMLAlgorithms","title":"SciMLBase.isadaptive","text":"isadaptive(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm uses adaptivity, i.e. has a non-quasi-static compute graph.\n\nDefaults to true.\n\n\n\n\n\nis_integrator_adaptive(i::DEIntegrator)\n\nChecks if the integrator is adaptive\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.isdiscrete","page":"SciMLAlgorithms","title":"SciMLBase.isdiscrete","text":"isdiscrete(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm allows for discrete state values, such as integers.\n\nDefaults to false.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.forwarddiffs_model","page":"SciMLAlgorithms","title":"SciMLBase.forwarddiffs_model","text":"forwarddiffs_model(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm uses ForwardDiff.jl on the model function is called with ForwardDiff.jl\n\nDefaults to false as only pure-Julia algorithms can have this be true.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.forwarddiffs_model_time","page":"SciMLAlgorithms","title":"SciMLBase.forwarddiffs_model_time","text":"forwarddiffsmodeltime(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm uses ForwardDiff.jl on the model f(u,p,t) function is called with ForwardDiff.jl on the t argument.\n\nDefaults to false as only a few pure-Julia algorithms (Rosenbrock methods) have this as true\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#Abstract-SciML-Algorithms","page":"SciMLAlgorithms","title":"Abstract SciML Algorithms","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"SciMLBase.AbstractSciMLAlgorithm\nSciMLBase.AbstractDEAlgorithm\nSciMLBase.AbstractLinearAlgorithm\nSciMLBase.AbstractNonlinearAlgorithm\nSciMLBase.AbstractIntervalNonlinearAlgorithm\nSciMLBase.AbstractQuadratureAlgorithm\nSciMLBase.AbstractOptimizationAlgorithm\nSciMLBase.AbstractSteadyStateAlgorithm\nSciMLBase.AbstractODEAlgorithm\nSciMLBase.AbstractSecondOrderODEAlgorithm\nSciMLBase.AbstractRODEAlgorithm\nSciMLBase.AbstractSDEAlgorithm\nSciMLBase.AbstractDAEAlgorithm\nSciMLBase.AbstractDDEAlgorithm\nSciMLBase.AbstractSDDEAlgorithm","category":"page"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSciMLAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSciMLAlgorithm","text":"abstract type AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractDEAlgorithm","text":"abstract type AbstractDEAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractLinearAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractLinearAlgorithm","text":"abstract type AbstractLinearAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractNonlinearAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractNonlinearAlgorithm","text":"abstract type AbstractNonlinearAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractIntervalNonlinearAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractIntervalNonlinearAlgorithm","text":"abstract type AbstractIntervalNonlinearAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractQuadratureAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractQuadratureAlgorithm","text":"abstract type AbstractIntegralAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractOptimizationAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractOptimizationAlgorithm","text":"abstract type AbstractOptimizationAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSteadyStateAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSteadyStateAlgorithm","text":"abstract type AbstractSteadyStateAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractODEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractODEAlgorithm","text":"abstract type AbstractODEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSecondOrderODEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSecondOrderODEAlgorithm","text":"abstract type AbstractSecondOrderODEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractRODEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractRODEAlgorithm","text":"abstract type AbstractRODEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSDEAlgorithm","text":"abstract type AbstractSDEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractDAEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractDAEAlgorithm","text":"abstract type AbstractDAEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractDDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractDDEAlgorithm","text":"abstract type AbstractDDEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSDDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSDDEAlgorithm","text":"abstract type AbstractSDDEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Init_Solve/#The-SciML-init-and-solve-Functions","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"","category":"section"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"solve function has the default definition","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"solve(args...; kwargs...) = solve!(init(args...; kwargs...))","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"The interface for the three functions is as follows:","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"init(::ProblemType, args...; kwargs...) :: IteratorType\nsolve!(::IteratorType) :: SolutionType","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"where ProblemType, IteratorType, and SolutionType are the types defined in your package.","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"To avoid method ambiguity, the first argument of solve, solve!, and init must be dispatched on the type defined in your package. For example, do not define a method such as","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"init(::AbstractVector, ::AlgorithmType)","category":"page"},{"location":"interfaces/Init_Solve/#init-and-the-Iterator-Interface","page":"The SciML init and solve Functions","title":"init and the Iterator Interface","text":"","category":"section"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"init's return gives an IteratorType which is designed to allow the user to have more direct handling over the internal solving process. Because of this internal nature, the IteratorType has a less unified interface across problem types than other portions like ProblemType and SolutionType. For example, for differential equations this is the Integrator Interface designed for mutating solutions in a manner for callback implementation, which is distinctly different from the LinearSolve init interface which is designed for caching efficiency with reusing factorizations.","category":"page"},{"location":"interfaces/Init_Solve/#__solve-and-High-Level-Handling","page":"The SciML init and solve Functions","title":"__solve and High-Level Handling","text":"","category":"section"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"While init and solve are the common entry point for users, solver packages will mostly define dispatches on SciMLBase.__init and SciMLBase.__solve. The reason is because this allows for SciMLBase.init and SciMLBase.solve to have common implementations across all solvers for doing things such as checking for common errors and throwing high level messages. Solvers can opt-out of the high level error handling by directly defining SciMLBase.init and SciMLBase.solve instead, though this is not recommended in order to allow for uniformity of the error messages.","category":"page"},{"location":"interfaces/Differentiation/#sensealg","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"","category":"section"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"Automatic differentiation control is done through the sensealg keyword argument. Hooks exist in the high level interfaces for solve which shuttle the definitions of automatic differentiation overloads to dispatches defined in DiffEqSensitivity.jl (should be renamed SciMLSensitivity.jl as it expands). This is done by first entering a top-level solve definition, for example:","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"function solve(prob::AbstractDEProblem, args...; sensealg=nothing,\n u0=nothing, p=nothing, kwargs...)\n u0 = u0 !== nothing ? u0 : prob.u0\n p = p !== nothing ? p : prob.p\n if sensealg === nothing && haskey(prob.kwargs, :sensealg)\n sensealg = prob.kwargs[:sensealg]\n end\n solve_up(prob, sensealg, u0, p, args...; kwargs...)\nend","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"solve_up then drops down the differentiable arguments as positional arguments, which is required for the ChainRules.jl interface. Then the ChainRules overloads are written on the solve_up calls, like:","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"function ChainRulesCore.frule(::typeof(solve_up), prob,\n sensealg::Union{Nothing,AbstractSensitivityAlgorithm},\n u0, p, args...;\n kwargs...)\n _solve_forward(prob, sensealg, u0, p, args...; kwargs...)\nend\n\nfunction ChainRulesCore.rrule(::typeof(solve_up), prob::SciMLBase.AbstractDEProblem,\n sensealg::Union{Nothing,AbstractSensitivityAlgorithm},\n u0, p, args...;\n kwargs...)\n _solve_adjoint(prob, sensealg, u0, p, args...; kwargs...)\nend","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"Default definitions then exist to throw an informative error if the sensitivity mechanism is not added:","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"function _concrete_solve_adjoint(args...; kwargs...)\n error(\"No adjoint rules exist. Check that you added `using DiffEqSensitivity`\")\nend\n\nfunction _concrete_solve_forward(args...; kwargs...)\n error(\"No sensitivity rules exist. Check that you added `using DiffEqSensitivity`\")\nend","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"The sensitivity mechanism is kept in a separate package because of the high dependency and load time cost introduced by the automatic differentiation libraries. Different choices of automatic differentiation are then selected by the sensealg keyword argument in solve, which is made into a positional argument in the _solve_adjoint and other functions in order to allow dispatch.","category":"page"},{"location":"interfaces/Differentiation/#SensitivityADPassThrough","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"SensitivityADPassThrough","text":"","category":"section"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"The special sensitivity algorithm SensitivityADPassThrough is used to ignore the internal sensitivity dispatches and instead do automatic differentiation directly through the solver. Generally this sensealg is only used internally.","category":"page"},{"location":"interfaces/Differentiation/#Note-about-ForwardDiff","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Note about ForwardDiff","text":"","category":"section"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"ForwardDiff does not use ChainRules.jl and thus it completely ignores the special handling.","category":"page"},{"location":"#The-SciML-Common-Interface-for-Julia-Equation-Solvers","page":"Home","title":"The SciML Common Interface for Julia Equation Solvers","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The SciML common interface ties together the numerical solvers of the Julia package ecosystem into a single unified interface. It is designed for maximal efficiency and parallelism, while incorporating essential features for large-scale scientific machine learning such as differentiability, composability, and sparsity.","category":"page"},{"location":"","page":"Home","title":"Home","text":"This documentation is made to pool together the docs of the various SciML libraries to paint the overarching picture, establish development norms, and document the shared/common functionality.","category":"page"},{"location":"#Domains-of-SciML","page":"Home","title":"Domains of SciML","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The SciML common interface covers the following domains:","category":"page"},{"location":"","page":"Home","title":"Home","text":"Linear systems (LinearProblem)\nDirect methods for dense and sparse\nIterative solvers with preconditioning\nNonlinear Systems (NonlinearProblem)\nRootfinding for systems of nonlinear equations\nInterval Nonlinear Systems\nBracketing rootfinders for nonlinear equations with interval bounds\nIntegrals (quadrature) (IntegralProblem)\nDifferential Equations\nDiscrete equations (function maps, discrete stochastic (Gillespie/Markov) simulations) (DiscreteProblem)\nOrdinary differential equations (ODEs) (ODEProblem)\nSplit and Partitioned ODEs (Symplectic integrators, IMEX Methods) (SplitODEProblem)\nStochastic ordinary differential equations (SODEs or SDEs) (SDEProblem)\nStochastic differential-algebraic equations (SDAEs) (SDEProblem with mass matrices)\nRandom differential equations (RODEs or RDEs) (RODEProblem)\nDifferential algebraic equations (DAEs) (DAEProblem and ODEProblem with mass matrices)\nDelay differential equations (DDEs) (DDEProblem)\nNeutral, retarded, and algebraic delay differential equations (NDDEs, RDDEs, and DDAEs)\nStochastic delay differential equations (SDDEs) (SDDEProblem)\nExperimental support for stochastic neutral, retarded, and algebraic delay differential equations (SNDDEs, SRDDEs, and SDDAEs)\nMixed discrete and continuous equations (Hybrid Equations, Jump Diffusions) (AbstractDEProblems with callbacks)\nOptimization (OptimizationProblem)\nNonlinear (constrained) optimization\n(Stochastic/Delay/Differential-Algebraic) Partial Differential Equations (PDESystem)\nFinite difference and finite volume methods\nInterfaces to finite element methods\nPhysics-Informed Neural Networks (PINNs)\nIntegro-Differential Equations\nFractional Differential Equations","category":"page"},{"location":"","page":"Home","title":"Home","text":"The SciML common interface also includes ModelingToolkit.jl for defining such systems symbolically, allowing for optimizations like automated generation of parallel code, symbolic simplification, and generation of sparsity patterns.","category":"page"},{"location":"#Extended-SciML-Domain","page":"Home","title":"Extended SciML Domain","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"In addition to the purely numerical representations of mathematical objects, there are also sets of problem types associated with common mathematical algorithms. These are:","category":"page"},{"location":"","page":"Home","title":"Home","text":"Data-driven modeling\nDiscrete-time data-driven dynamical systems (DiscreteDataDrivenProblem)\nContinuous-time data-driven dynamical systems (ContinuousDataDrivenProblem)\nSymbolic regression (DirectDataDrivenProblem)\nUncertainty quantification and expected values (ExpectationProblem)","category":"page"},{"location":"#Inverse-Problems,-Parameter-Estimation,-and-Structural-Identification","page":"Home","title":"Inverse Problems, Parameter Estimation, and Structural Identification","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"We note that parameter estimation and inverse problems are solved directly on their constituent problem types using tools like DiffEqFlux.jl. Thus for example, there is no ODEInverseProblem, and instead ODEProblem is used to find the parameters p that solve the inverse problem.","category":"page"},{"location":"#Common-Interface-High-Level","page":"Home","title":"Common Interface High Level","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The SciML interface is common as the usage of arguments is standardized across all of the problem domains. Underlying high level ideas include:","category":"page"},{"location":"","page":"Home","title":"Home","text":"All domains use the same interface of defining a AbstractSciMLProblem which is then solved via solve(prob,alg;kwargs), where alg is a AbstractSciMLAlgorithm. The keyword argument namings are standardized across the organization.\nAbstractSciMLProblems are generally defined by a SciMLFunction which can define extra details about a model function, such as its analytical Jacobian, its sparsity patterns and so on.\nThere is an organization-wide method for defining linear and nonlinear solvers used within other solvers, giving maximum control of performance to the user.\nTypes used within the packages are defined by the input types. For example, packages attempt to internally use the type of the initial condition as the type for the state within differential equation solvers.\nsolve calls should be thread-safe and parallel-safe.\ninit(prob,alg;kwargs) returns an iterator which allows for directly iterating over the solution process\nHigh performance is key. Any performance that is not at the top level is considered a bug and should be reported as such.\nAll functions have an in-place and out-of-place form, where the in-place form is made to utilize mutation for high performance on large-scale problems and the out-of-place form is for compatibility with tooling like static arrays and some reverse-mode automatic differentiation systems.","category":"page"},{"location":"#User-Facing-Solver-Libraries","page":"Home","title":"User-Facing Solver Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"DifferentialEquations.jl\nMulti-package interface of high performance numerical solvers of differential equations\nModelingToolkit.jl\nThe symbolic modeling package which implements the SciML symbolic common interface.\nLinearSolve.jl\nMulti-package interface for specifying linear solvers (direct, sparse, and iterative), along with tools for caching and preconditioners for use in large-scale modeling.\nNonlinearSolve.jl\nHigh performance numerical solving of nonlinear systems.\nIntegrals.jl\nMulti-package interface for high performance, batched, and parallelized numerical quadrature.\nOptimization.jl\nMulti-package interface for numerical solving of optimization problems.\nNeuralPDE.jl\nPhysics-Informed Neural Network (PINN) package for transforming partial differential equations into optimization problems.\nDiffEqOperators.jl\nAutomated finite difference method (FDM) package for transforming partial differential equations into nonlinear problems and ordinary differential equations.\nDiffEqFlux.jl\nHigh level package for scientific machine learning applications, such as neural and universal differential equations, solving of inverse problems, parameter estimation, nonlinear optimal control, and more.\nDataDrivenDiffEq.jl\nMulti-package interface for data-driven modeling, Koopman dynamic mode decomposition, symbolic regression/sparsification, and automated model discovery.\nSciMLExpectations.jl\nExtension to the dynamical modeling tools for calculating expectations.","category":"page"},{"location":"#Interface-Implementation-Libraries","page":"Home","title":"Interface Implementation Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"SciMLBase.jl\nThe core package defining the interface which is consumed by the modeling and solver packages.\nDiffEqBase.jl\nThe core package defining the extended interface which is consumed by the differential equation solver packages.\nSciMLSensitivity.jl\nA package which pools together the definition of derivative overloads to define the common sensealg automatic differentiation interface.\nDiffEqNoiseProcess.jl\nA package which defines the stochastic AbstractNoiseProcess interface for the SciML ecosystem.\nRecursiveArrayTools.jl\nA package which defines the underlying AbstractVectorOfArray structure used as the output for all time series results.\nArrayInterface.jl\nThe package which defines the extended AbstractArray interface employed throughout the SciML ecosystem.","category":"page"},{"location":"#Using-Facing-Modeling-Libraries","page":"Home","title":"Using-Facing Modeling Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"There are too many to name here and this will be populated when there is time!","category":"page"},{"location":"#Flowchart-Example-for-PDE-Constrained-Optimal-Control","page":"Home","title":"Flowchart Example for PDE-Constrained Optimal Control","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The following example showcases how the pieces of the common interface connect to solve a problem that mixes inference, symbolics, and numerics.","category":"page"},{"location":"","page":"Home","title":"Home","text":"(Image: )","category":"page"},{"location":"#External-Binding-Libraries","page":"Home","title":"External Binding Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"diffeqr\nSolving differential equations in R using DifferentialEquations.jl with ModelingToolkit for JIT compilation and GPU-acceleration\ndiffeqpy\nSolving differential equations in Python using DifferentialEquations.jl","category":"page"},{"location":"#Solver-Libraries","page":"Home","title":"Solver Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"There are too many to name here. Check out the SciML Organization Github Page for details.","category":"page"},{"location":"#Contributing","page":"Home","title":"Contributing","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Please refer to the SciML ColPrac: Contributor's Guide on Collaborative Practices for Community Packages for guidance on PRs, issues, and other matters relating to contributing to SciML.\nSee the SciML Style Guide for common coding practices and other style decisions.\nThere are a few community forums:\nThe #diffeq-bridged and #sciml-bridged channels in the Julia Slack\nThe #diffeq-bridged and #sciml-bridged channels in the Julia Zulip\nOn the Julia Discourse forums\nSee also SciML Community page","category":"page"},{"location":"#Reproducibility","page":"Home","title":"Reproducibility","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"
The documentation of this SciML package was built using these direct dependencies,","category":"page"},{"location":"","page":"Home","title":"Home","text":"using Pkg # hide\nPkg.status() # hide","category":"page"},{"location":"","page":"Home","title":"Home","text":"
","category":"page"},{"location":"","page":"Home","title":"Home","text":"
and using this machine and Julia version.","category":"page"},{"location":"","page":"Home","title":"Home","text":"using InteractiveUtils # hide\nversioninfo() # hide","category":"page"},{"location":"","page":"Home","title":"Home","text":"
","category":"page"},{"location":"","page":"Home","title":"Home","text":"
A more complete overview of all dependencies and their versions is also provided.","category":"page"},{"location":"","page":"Home","title":"Home","text":"using Pkg # hide\nPkg.status(;mode = PKGMODE_MANIFEST) # hide","category":"page"},{"location":"","page":"Home","title":"Home","text":"
","category":"page"},{"location":"","page":"Home","title":"Home","text":"using TOML\nusing Markdown\nversion = TOML.parse(read(\"../../Project.toml\", String))[\"version\"]\nname = TOML.parse(read(\"../../Project.toml\", String))[\"name\"]\nlink_manifest = \"https://github.com/SciML/\" * name * \".jl/tree/gh-pages/v\" * version *\n \"/assets/Manifest.toml\"\nlink_project = \"https://github.com/SciML/\" * name * \".jl/tree/gh-pages/v\" * version *\n \"/assets/Project.toml\"\nMarkdown.parse(\"\"\"You can also download the\n[manifest]($link_manifest)\nfile and the\n[project]($link_project)\nfile.\n\"\"\")","category":"page"},{"location":"interfaces/Array_and_Number/#arrayandnumber","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"We live in a society, and therefore there are rules. In this tutorial we outline the rules which are required on container and number types which are allowable in SciML tools.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"warn: Warn\nIn general as of 2023, strict adherence to this interface is an early work-in-progress. If anything does not conform to the documented interface, please open an issue.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"note: Note\nThere are many types which can work with a specific solver that do satisfy this interface. Many times as part of prototyping you may want to side-step the high level interface checks in order to simply test whether a new type is working. To do this, set interface_checks = false as a keyword argument to init/solve to bypass any of the internal interface checks. This means you will no longer get a nice high-level error message and instead it will attempt to use the type without restrictions. Note that not every problem/solver has implemented this new keyword argument as of 2023.","category":"page"},{"location":"interfaces/Array_and_Number/#Note-About-Wrapped-Solvers","page":"SciML Container (Array) and Number Interfaces","title":"Note About Wrapped Solvers","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Due to limitations of wrapped solvers, any solver that is a wrapped solver from an existing C/Fortran code is inherently limited to Float64 and Vector{Float64} for its operations. This includes packages like Sundials.jl, LSODA.jl, DASKR.jl, MINPACK.jl, and many more. This is fundamental to these solvers and it is not expected that they will allow the full set of SciML types in the future. If more abstract number/container definitions are required, then these are not the appropriate solvers to use.","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Number-Types","page":"SciML Container (Array) and Number Interfaces","title":"SciML Number Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"The number types are the types used to define the dependent variables (i.e. u0) and the independent variables (t or tspan). These two types can be different, and can have different restrictions depending on the type of solver which is employed. The following rules for a Number type are held in general:","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Number types can be used in SciML directly or in containers. If a problem defines a value like u0 using a Number type, the out-of-place form must be used for the problem definition.\nx::T + y::T = z::T\nx::T * y::T = z::T\noneunit(x::T)::T\none(x::T) * oneunit(x::T) = z::T\nt::T2 * x::T + y::T = z::T for T2 a time type and T the dependent variable type (this includes the muladd equivalent form).","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Additionally, the following rules apply to subsets of uses:","category":"page"},{"location":"interfaces/Array_and_Number/#Adaptive-Number-Types","page":"SciML Container (Array) and Number Interfaces","title":"Adaptive Number Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"x::T / y::T = z::T\nDefault choices of norms can assume sqrt(x::T)::T exists. If internalnorm is overridden then this may not be required (for example, changing the norm to inf-norm).\nx::T ^ y::T = z::T","category":"page"},{"location":"interfaces/Array_and_Number/#Time-Types-(Independent-Variables)","page":"SciML Container (Array) and Number Interfaces","title":"Time Types (Independent Variables)","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"If a solver is time adaptive, the time type must be a floating point number. Rational is only allowed for non-adaptive solves.","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Container-(Array)-Types","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Container types are types which hold number types. They can be used to define objects like the state vector (u0) of a problem. The following operations are required in a container type to be used with SciML solvers:","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Broadcast is defined according to the Julia broadcast interface.\nThe container type correctly defines interface overloads to satisfy the ArrayInterface.jl specification.\nArrayInterface.zeromatrix(x::T)::T2 defines a compatible matrix type (see below)\neltype(x::T)::T2 is a compatible Number type.\nx::T .+ y::T = z::T (i.e. broadcast similar is defined to be type-presurving)\nIndexing is only required if ArrayInterface.fast_scalar_indexing(x::T)==true. If true, scalar indexing x[i] is assumed to be defined and run through all variables.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"note: Note\n\"eltype(x::T)::T2 is a compatible Number type\" excludes Array{Array{T}} types of types. However, recursive vectors can conformed to the interface with zero overhead using tools from RecursiveArrayTools.jl such as VectorOfArray(x). Since this greatly simplifies the interfaces and the ability to check for correctness, doing this wrapping is highly recommended and there are no plans to relax this requirement.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Additionally, the following rules apply to subsets of uses:","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Mutable-Array-Types","page":"SciML Container (Array) and Number Interfaces","title":"SciML Mutable Array Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"similar(x::T)::T\nzero(x::T)::T\nz::T .= x::T .+ y::T is defined\nz::T .= x::T .* y::T is defined\nz::T .= t::T2 .* x::T where T2 is the time type (a Number) and T is the container type.\n(Optional) Base.resize!(x,i) is required for resize!(integrator,i) to be supported.","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Matrix-(Operator)-Type","page":"SciML Container (Array) and Number Interfaces","title":"SciML Matrix (Operator) Type","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Note that the matrix type may not match the type of the initial container u0. An example is ComponentMatrix as the matrix structure corresponding to a ComponentArray. However, the following actions are assumed to hold on the resulting matrix type:","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"solve(LinearProblem(A::T,b::T2),linsolve) must be defined for a solver to work on a given SciML matrix type T2.\nIf the matrix is an operator, i.e. a lazy construct, it should conform to the SciMLOperators interface.\nIf not a SciMLOperator, diagind(W::T) should be defined and @view(A[idxs])=@view(A[idxs]) + λ::T","category":"page"},{"location":"interfaces/PDE/#The-PDE-Definition-Interface","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"While ODEs u = f(upt) can be defined by a user-function f, for PDEs the function form can be different for every PDE. How many functions, and how many inputs? This can always change. The SciML ecosystem solves this problem by using ModelingToolkit.jl to define PDESystem, a high-level symbolic description of the PDE to be consumed by other packages.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"The vision for the common PDE interface is that a user should only have to specify their PDE once, mathematically, and have instant access to everything as simple as a finite difference method with constant grid spacing, to something as complex as a distributed multi-GPU discontinuous Galerkin method.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"The key to the common PDE interface is a separation of the symbolic handling from the numerical world. All of the discretizers should not \"solve\" the PDE, but instead be a conversion of the mathematical specification to a numerical problem. Preferably, the transformation should be to another ModelingToolkit.jl AbstractSystem via a symbolic_discretize dispatch, but in some cases this cannot be done or will not be performant. Thus in some cases, only a discretize definition is given to a AbstractSciMLProblem, with symbolic_discretize simply providing diagnostic or lower level information about the construction process.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"These elementary problems, such as solving linear systems Ax=b, solving nonlinear systems f(x)=0, ODEs, etc. are all defined by SciMLBase.jl, which then numerical solvers can all target these common forms. Thus someone who works on linear solvers doesn't necessarily need to be working on a Discontinuous Galerkin or finite element library, but instead \"linear solvers that are good for matrices A with properties ...\" which are then accessible by every other discretization method in the common PDE interface.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"Similar to the rest of the AbstractSystem types, transformation and analyses functions will allow for simplifying the PDE before solving it, and constructing block symbolic functions like Jacobians.","category":"page"},{"location":"interfaces/PDE/#Constructors","page":"The PDE Definition Interface","title":"Constructors","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"ModelingToolkit.PDESystem","category":"page"},{"location":"interfaces/PDE/#ModelingToolkit.PDESystem","page":"The PDE Definition Interface","title":"ModelingToolkit.PDESystem","text":"struct PDESystem <: AbstractMultivariateSystem\n\nA system of partial differential equations.\n\nFields\n\neqs: The equations which define the PDE.\nbcs: The boundary conditions.\ndomain: The domain for the independent variables.\nivs: The independent variables.\ndvs: The dependent variables.\nps: The parameters.\ndefaults: The default values to use when initial conditions and/or parameters are not supplied in ODEProblem.\n\nconnector_type: Type of the system.\n\nsystems: The internal systems. These are required to have unique names.\n\nanalytic: A vector of explicit symbolic expressions for the analytic solutions of each dependent variable. e.g. analytic = [u(t, x) ~ a*sin(c*t) * cos(k*x)].\n\nanalytic_func: A vector of functions for the analytic solutions of each dependent variable. Will be generated from analytic if not provided. Should have the same argument signature as the variable, and a ps argument as the last argument, which takes an indexable of parameter values in the order you specified them in ps. e.g. analytic_func = [u(t, x) => (ps, t, x) -> ps[1]*sin(ps[2]*t) * cos(ps[3]*x)].\n\nname: The name of the system.\n\nmetadata: Metadata for the system, to be used by downstream packages.\n\ngui_metadata: Metadata for MTK GUI.\n\nExample\n\nusing ModelingToolkit\n\n@parameters x\n@variables t u(..)\nDxx = Differential(x)^2\nDtt = Differential(t)^2\nDt = Differential(t)\n\n#2D PDE\nC=1\neq = Dtt(u(t,x)) ~ C^2*Dxx(u(t,x))\n\n# Initial and boundary conditions\nbcs = [u(t,0) ~ 0.,# for all t > 0\n u(t,1) ~ 0.,# for all t > 0\n u(0,x) ~ x*(1. - x), #for all 0 < x < 1\n Dt(u(0,x)) ~ 0. ] #for all 0 < x < 1]\n\n# Space and time domains\ndomains = [t ∈ (0.0,1.0),\n x ∈ (0.0,1.0)]\n\n@named pde_system = PDESystem(eq,bcs,domains,[t,x],[u])\n\n\n\n\n\n","category":"type"},{"location":"interfaces/PDE/#Domains-(WIP)","page":"The PDE Definition Interface","title":"Domains (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"Domains are specifying by saying indepvar in domain, where indepvar is a single or a collection of independent variables, and domain is the chosen domain type. A 2-tuple can be used to indicate an Interval. Thus forms for the indepvar can be like:","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"t ∈ (0.0,1.0)\n(t,x) ∈ UnitDisk()\n[v,w,x,y,z] ∈ VectorUnitBall(5)","category":"page"},{"location":"interfaces/PDE/#Domain-Types-(WIP)","page":"The PDE Definition Interface","title":"Domain Types (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"Interval(a,b): Defines the domain of an interval from a to b (requires explicit","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"import from DomainSets.jl, but a 2-tuple can be used instead)","category":"page"},{"location":"interfaces/PDE/#discretize-and-symbolic_discretize","page":"The PDE Definition Interface","title":"discretize and symbolic_discretize","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"The only functions which act on a PDESystem are the following:","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"discretize(sys,discretizer): produces the outputted AbstractSystem or AbstractSciMLProblem.\nsymbolic_discretize(sys,discretizer): produces a debugging symbolic description of the discretized problem.","category":"page"},{"location":"interfaces/PDE/#Boundary-Conditions-(WIP)","page":"The PDE Definition Interface","title":"Boundary Conditions (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/#Transformations","page":"The PDE Definition Interface","title":"Transformations","text":"","category":"section"},{"location":"interfaces/PDE/#Analyses","page":"The PDE Definition Interface","title":"Analyses","text":"","category":"section"},{"location":"interfaces/PDE/#Discretizer-Ecosystem","page":"The PDE Definition Interface","title":"Discretizer Ecosystem","text":"","category":"section"},{"location":"interfaces/PDE/#NeuralPDE.jl:-PhysicsInformedNN","page":"The PDE Definition Interface","title":"NeuralPDE.jl: PhysicsInformedNN","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"NeuralPDE.jl defines the PhysicsInformedNN discretizer which uses a DiffEqFlux.jl neural network to solve the differential equation.","category":"page"},{"location":"interfaces/PDE/#MethodOfLines.jl:-MOLFiniteDifference-(WIP)","page":"The PDE Definition Interface","title":"MethodOfLines.jl: MOLFiniteDifference (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"MethodOfLines.jl defines the MOLFiniteDifference discretizer which performs a finite difference discretization using the DiffEqOperators.jl stencils. These stencils make use of NNLib.jl for fast operations on semi-linear domains.","category":"page"}] +[{"location":"interfaces/SciMLFunctions/#scimlfunctions","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The SciML ecosystem provides an extensive interface for declaring extra functions associated with the differential equation's data. In traditional libraries there is usually only one option: the Jacobian. However, we allow for a large array of pre-computed functions to speed up the calculations. This is offered via the SciMLFunction types which can be passed to the problems.","category":"page"},{"location":"interfaces/SciMLFunctions/#Definition-of-the-AbstractSciMLFunction-Interface","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Definition of the AbstractSciMLFunction Interface","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The following standard principles should be adhered to across all AbstractSciMLFunction instantiations.","category":"page"},{"location":"interfaces/SciMLFunctions/#Common-Function-Choice-Definitions","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Common Function Choice Definitions","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The full interface available to the solvers is as follows:","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"jac: The Jacobian of the differential equation with respect to the state variable u at a time t with parameters p.\nparamjac: The Jacobian of the differential equation with respect to p at state u at time t.\nanalytic: Defines an analytical solution using u0 at time t with p which will cause the solvers to return errors. Used for testing.\nsyms: Allows you to name your variables for automatic names in plots and other output.\njac_prototype: Defines the type to be used for any internal Jacobians within the solvers.\nsparsity: Defines the sparsity pattern to be used for the sparse differentiation schemes. By default this is equal to jac_prototype. See the sparsity handling portion of this page for more information.\ncolorvec: The coloring pattern used by the sparse differentiator. See the sparsity handling portion of this page for more information.\nobserved: A function which allows for generating other observables from a solution.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Each function type additionally has some specific arguments, refer to their documentation for details.","category":"page"},{"location":"interfaces/SciMLFunctions/#In-place-Specification-and-No-Recompile-Mode","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"In-place Specification and No-Recompile Mode","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Each SciMLFunction type can be called with an \"is inplace\" (iip) choice.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"ODEFunction(f)\nODEFunction{iip}(f)","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"which is a boolean for whether the function is in the inplace form (mutating to change the first value). This is automatically determined using the methods table but note that for full type-inferability of the AbstractSciMLProblem this iip-ness should be specified.","category":"page"},{"location":"interfaces/SciMLFunctions/#Specialization-Choices","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Specialization Choices","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Each SciMLFunction type allows for specialization choices","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"ODEFunction{iip,specialization}(f)","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"which designates how the compiler should specialize on the model function f. For more details on specialization choices, see the SciMLProblems page.","category":"page"},{"location":"interfaces/SciMLFunctions/#Specifying-Jacobian-Types","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Specifying Jacobian Types","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The jac field of an inplace style SciMLFunction has the signature jac(J,u,p,t), which updates the Jacobian J in-place. The intended type for J can sometimes be inferred (e.g. when it is just a dense Matrix), but not in general. To supply the type information, you can provide a jac_prototype in the function's constructor.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The following example creates an inplace ODEFunction whose Jacobian is a Diagonal:","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"using LinearAlgebra\nf = (du,u,p,t) -> du .= t .* u\njac = (J,u,p,t) -> (J[1,1] = t; J[2,2] = t; J)\njp = Diagonal(zeros(2))\nfun = ODEFunction(f; jac=jac, jac_prototype=jp)","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"Note that the integrators will always make a deep copy of fun.jac_prototype, so there's no worry of aliasing.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"In general the jacobian prototype can be anything that has mul! defined, in particular sparse matrices or custom lazy types that support mul!. A special case is when the jac_prototype is a AbstractSciMLOperator, in which case you do not need to supply jac as it is automatically set to update_coefficients!. Refer to the SciMLOperators section for more information on setting up time/parameter dependent operators.","category":"page"},{"location":"interfaces/SciMLFunctions/#Sparsity-Handling","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Sparsity Handling","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The solver libraries internally use packages such as FiniteDiff.jl and SparseDiffTools.jl for high performance calculation of sparse Jacobians and Hessians, along with matrix-free calculations of Jacobian-Vector products (Jv), vector-Jacobian products (v'J), and Hessian-vector products (H*v). The SciML interface gives users the ability to control these connections in order to allow for top notch performance.","category":"page"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"The key arguments in the SciMLFunction is the prototype, which is an object that will be used as the underlying Jacobian/Hessian. Thus if one wants to use a sparse Jacobian, one should specify jac_prototype to be a sparse matrix. The sparsity pattern used in the differentiation scheme is defined by sparsity. By default, sparsity=jac_prototype, meaning that the sparse automatic differentiation scheme should specialize on the sparsity pattern given by the actual sparsity pattern. This can be overridden to say perform partial matrix coloring approximations. Additionally, the color vector for the sparse differentiation directions can be specified directly via colorvec. For more information on how these arguments control the differentiation process, see the aforementioned differentiation library documentations.","category":"page"},{"location":"interfaces/SciMLFunctions/#Traits","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Traits","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"SciMLBase.isinplace(f::SciMLBase.AbstractSciMLFunction)","category":"page"},{"location":"interfaces/SciMLFunctions/#AbstractSciMLFunction-API","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"AbstractSciMLFunction API","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/#Abstract-SciML-Functions","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"Abstract SciML Functions","text":"","category":"section"},{"location":"interfaces/SciMLFunctions/","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLFunctions (Jacobians, Sparsity, Etc.)","text":"SciMLBase.AbstractDiffEqFunction\nSciMLBase.AbstractODEFunction\nSciMLBase.AbstractSDEFunction\nSciMLBase.AbstractDDEFunction\nSciMLBase.AbstractDAEFunction\nSciMLBase.AbstractRODEFunction\nSciMLBase.AbstractDiscreteFunction\nSciMLBase.AbstractSDDEFunction\nSciMLBase.AbstractNonlinearFunction","category":"page"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDiffEqFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDiffEqFunction","text":"abstract type AbstractDiffEqFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}\n\nBase for types defining differential equation functions.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractODEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractODEFunction","text":"abstract type AbstractODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractSDEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractSDEFunction","text":"abstract type AbstractSDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDDEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDDEFunction","text":"abstract type AbstractDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDAEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDAEFunction","text":"abstract type AbstractDAEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractRODEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractRODEFunction","text":"abstract type AbstractRODEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractDiscreteFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractDiscreteFunction","text":"abstract type AbstractDiscreteFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractSDDEFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractSDDEFunction","text":"abstract type AbstractSDDEFunction{iip} <: SciMLBase.AbstractDiffEqFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/SciMLFunctions/#SciMLBase.AbstractNonlinearFunction","page":"SciMLFunctions (Jacobians, Sparsity, Etc.)","title":"SciMLBase.AbstractNonlinearFunction","text":"abstract type AbstractNonlinearFunction{iip} <: SciMLBase.AbstractSciMLFunction{iip}\n\n\n\n\n\n","category":"type"},{"location":"fundamentals/FAQ/#Frequently-Asked-Questions","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"","category":"section"},{"location":"fundamentals/FAQ/#What-are-the-code-styling-rules-for-SciML?","page":"Frequently Asked Questions","title":"What are the code styling rules for SciML?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"All SciML libraries are supposed to follow SciMLStyle. Any deviation from that style is something to be fixed.","category":"page"},{"location":"fundamentals/FAQ/#Where-do-I-find-more-information-on-the-internals-of-some-packages?","page":"Frequently Asked Questions","title":"Where do I find more information on the internals of some packages?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"The SciML Developer Documentation describes the internals of some of the larger solver libraries at length.","category":"page"},{"location":"fundamentals/FAQ/#What-are-the-community-practices-that-SciML-developers-should-use?","page":"Frequently Asked Questions","title":"What are the community practices that SciML developers should use?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"See ColPrac: Contributor's Guide on Collaborative Practices for Community Packages","category":"page"},{"location":"fundamentals/FAQ/#Are-there-developer-programs-to-help-fund-parties-interested-in-helping-develop-SciML?","page":"Frequently Asked Questions","title":"Are there developer programs to help fund parties interested in helping develop SciML?","text":"","category":"section"},{"location":"fundamentals/FAQ/","page":"Frequently Asked Questions","title":"Frequently Asked Questions","text":"Yes! See the SciML Developer Programs webpage.","category":"page"},{"location":"interfaces/Problems/#scimlproblems","page":"SciMLProblems","title":"SciMLProblems","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The cornerstone of the SciML common interface is the problem type definition. These definitions are the encoding of mathematical problems into a numerically computable form.","category":"page"},{"location":"interfaces/Problems/#Note-About-Symbolics-and-ModelingToolkit","page":"SciMLProblems","title":"Note About Symbolics and ModelingToolkit","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The symbolic analog to the problem interface is the ModelingToolkit AbstractSystem. For example, ODESystem is the symbolic analog to ODEProblem. Each of these system types have a method for constructing the associated problem and function types.","category":"page"},{"location":"interfaces/Problems/#Definition-of-the-AbstractSciMLProblem-Interface","page":"SciMLProblems","title":"Definition of the AbstractSciMLProblem Interface","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The following standard principles should be adhered to across all AbstractSciMLProblem instantiations.","category":"page"},{"location":"interfaces/Problems/#In-place-Specification","page":"SciMLProblems","title":"In-place Specification","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Each AbstractSciMLProblem type can be called with an \"is inplace\" (iip) choice. For example:","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"ODEProblem(f,u0,tspan,p)\nODEProblem{iip}(f,u0,tspan,p)","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"which is a boolean for whether the function is in the inplace form (mutating to change the first value). This is automatically determined using the methods table but note that for full type-inferability of the AbstractSciMLProblem this iip-ness should be specified.","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Additionally, the functions are fully specialized to reduce the runtimes. If one would instead like to not specialize on the functions to reduce compile time, then one can set recompile to false.","category":"page"},{"location":"interfaces/Problems/#Specialization-Levels","page":"SciMLProblems","title":"Specialization Levels","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Specialization levels in problem definitions are used to control the amount of compilation specialization is performed on the model functions in order to trade off between runtime performance, simplicity, and compile-time performance. The default choice of specialization is AutoSpecialize, which seeks to allow for using fully precompiled solvers in common scenarios but falls back to a runtime-optimal approach when further customization is used.","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Specialization levels are given as the second type parameter in AbstractSciMLProblem constructors. For example, this is done via:","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"ODEProblem{iip,specialization}(f,u0,tspan,p)","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"Note that iip choice is required for specialization choices to be made.","category":"page"},{"location":"interfaces/Problems/#Specialization-Choices","page":"SciMLProblems","title":"Specialization Choices","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SciMLBase.AbstractSpecialization\nSciMLBase.AutoSpecialize\nSciMLBase.NoSpecialize\nSciMLBase.FunctionWrapperSpecialize\nSciMLBase.FullSpecialize","category":"page"},{"location":"interfaces/Problems/#SciMLBase.AbstractSpecialization","page":"SciMLProblems","title":"SciMLBase.AbstractSpecialization","text":"abstract type AbstractSpecialization\n\nSupertype for the specialization types. Controls the compilation and function specialization behavior of SciMLFunctions, ultimately controlling the runtime vs compile-time trade-off.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AutoSpecialize","page":"SciMLProblems","title":"SciMLBase.AutoSpecialize","text":"struct AutoSpecialize <: SciMLBase.AbstractSpecialization\n\nThe default specialization level for problem functions. AutoSpecialize works by applying a function wrap just-in-time before the solve process to disable just-in-time re-specialization of the solver to the specific choice of model f and thus allow for using a cached solver compilation from a different f. This wrapping process can lead to a small decreased runtime performance with a benefit of a greatly decreased compile-time.\n\nNote About Benchmarking and Runtime Optimality\n\nIt is recommended that AutoSpecialize is not used in any benchmarking due to the potential effect of function wrapping on runtimes. AutoSpecialize's use case is targeted at decreased latency for REPL performance and not for cases where where top runtime performance is required (such as in optimization loops). Generally, for non-stiff equations the cost will be minimal and potentially not even measurable. For stiff equations, function wrapping has the limitation that only chunk sized 1 Dual numbers are allowed, which can decrease Jacobian construction performance.\n\nLimitations of AutoSpecialize\n\nThe following limitations are not fundamental to the implementation of AutoSpecialize, but are instead chosen as a compromise between default precompilation times and ease of maintenance. Please open an issue to discuss lifting any potential limitations.\n\nAutoSpecialize is only setup to wrap the functions from in-place ODEs. Other cases are excluded for the time being due to time limitations.\nAutoSpecialize will only lead to compilation reuse if the ODEFunction's other functions (such as jac and tgrad) are the default nothing. These could be JIT wrapped as well in a future version.\nAutoSpecialize'd functions are only compatible with Jacobian calculations performed with chunk size 1, and only with tag DiffEqBase.OrdinaryDiffEqTag(). Thus ODE solvers written on the common interface must be careful to detect the AutoSpecialize case and perform differentiation under these constraints, use finite differencing, or manually unwrap before solving. This will lead to decreased runtime performance for sufficiently large Jacobians.\nAutoSpecialize only wraps on Julia v1.8 and higher.\nAutoSpecialize does not handle cases with units. If unitful values are detected, wrapping is automatically disabled.\nAutoSpecialize only wraps cases for which promote_rule is defined between u0 and dual numbers, u0 and t, and for which ArrayInterface.promote_eltype is defined on u0 to dual numbers.\nAutoSpecialize only wraps cases for which f.mass_matrix isa UniformScaling, the default.\nAutoSpecialize does not wrap cases where f isa AbstractSciMLOperator\nBy default, only the u0 isa Vector{Float64}, eltype(tspan) isa Float64, and typeof(p) isa Union{Vector{Float64},SciMLBase.NullParameters} are specialized by the solver libraries. Other forms can be specialized with AutoSpecialize, but must be done in the precompilation of downstream libraries.\nAutoSpecialized functions are manually unwrapped in adjoint methods in SciMLSensitivity.jl in order to allow compiler support for automatic differentiation. Improved versions of adjoints which decrease the recompilation surface will come in non-breaking updates.\n\nCases where automatic wrapping is disabled are equivalent to FullSpecialize.\n\nExample\n\nf(du,u,p,t) = (du .= u)\n\n# Note this is the same as ODEProblem(f, [1.0], (0.0,1.0))\n# If no preferences are set\nODEProblem{true, SciMLBase.AutoSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.NoSpecialize","page":"SciMLProblems","title":"SciMLBase.NoSpecialize","text":"struct NoSpecialize <: SciMLBase.AbstractSpecialization\n\nNoSpecialize forces SciMLFunctions to not specialize on the types of functions wrapped within it. This ultimately contributes to a form such that every prob.f type is the same, meaning compilation caches are fully reused, with the downside of losing runtime performance. NoSpecialize is the form that most fully trades off runtime for compile time. Unlike AutoSpecialize, NoSpecialize can be used with any SciMLFunction.\n\nExample\n\nf(du,u,p,t) = (du .= u)\nODEProblem{true, SciMLBase.NoSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.FunctionWrapperSpecialize","page":"SciMLProblems","title":"SciMLBase.FunctionWrapperSpecialize","text":"struct FunctionWrapperSpecialize <: SciMLBase.AbstractSpecialization\n\nFunctionWrapperSpecialize is an eager wrapping choice which performs a function wrapping during the ODEProblem construction. This performs the function wrapping at the earliest possible point, giving the best compile-time vs runtime performance, but with the difficulty that any usage of prob.f needs to account for the function wrapper's presence. While optimal in a performance sense, this method has many usability issues with nonstandard solvers and analyses as it requires unwrapping before re-wrapping for any type changes. Thus this method is not used by default. Given that the compile-time different is almost undetectable from AutoSpecialize, this method is mostly used as a benchmarking reference for speed of light for AutoSpecialize.\n\nLimitations of FunctionWrapperSpecialize\n\nFunctionWrapperSpecialize has all of the limitations of AutoSpecialize, but also includes the limitations:\n\nprob.f is directly specialized to the types of (u,p,t), and any usage of prob.f on other types first requires using SciMLBase.unwrapped_f(prob.f) to remove the function wrapper.\nFunctionWrapperSpecialize can only be used by the ODEProblem constructor. If an ODEFunction is being constructed, the user must manually use DiffEqBase.wrap_iip on f before calling ODEFunction{true,FunctionWrapperSpecialize}(f). This is a fundamental limitation of the approach as the types of (u,p,t) are required in the construction process and not accessible in the AbstractSciMLFunction constructors.\n\nExample\n\nf(du,u,p,t) = (du .= u)\nODEProblem{true, SciMLBase.FunctionWrapperSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.FullSpecialize","page":"SciMLProblems","title":"SciMLBase.FullSpecialize","text":"struct FullSpecialize <: SciMLBase.AbstractSpecialization\n\nFullSpecialize is an eager specialization choice which directly types the AbstractSciMLFunction struct to match the type of the model f. This forces recompilation of the solver on each new function type f, leading to the most compile times with the benefit of having the best runtime performance.\n\nFullSpecialize should be used in all cases where top runtime performance is required, such as in long-running simulations and benchmarking.\n\nExample\n\nf(du,u,p,t) = (du .= u)\nODEProblem{true, SciMLBase.FullSpecialize}(f, [1.0], (0.0,1.0))\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"note: Note\nThe specialization level must be precompile snooped in the appropriate solver package in order to enable the full precompilation and system image generation for zero-latency usage. By default, this is only done with AutoSpecialize and on types u isa Vector{Float64}, eltype(tspan) isa Float64, and p isa Union{Vector{Float64}, SciMLBase.NullParameters}. Precompilation snooping in the solvers can be done using the Preferences.jl setup on the appropriate solver. See the solver library's documentation for more details.","category":"page"},{"location":"interfaces/Problems/#Default-Parameters","page":"SciMLProblems","title":"Default Parameters","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"By default, AbstractSciMLProblem types use the SciMLBase.NullParameters() singleton to define the absence of parameters by default. The reason is because this throws an informative error if the parameter is used or accessed within the user's function, for example, p[1] will throw an informative error about forgetting to pass parameters.","category":"page"},{"location":"interfaces/Problems/#Keyword-Argument-Splatting","page":"SciMLProblems","title":"Keyword Argument Splatting","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"All AbstractSciMLProblem types allow for passing keyword arguments that would get forwarded to the solver. The reason for this is that in many cases, like in EnsembleProblem usage, a AbstractSciMLProblem might be associated with some solver configuration, such as a callback or tolerance. Thus, for flexibility the extra keyword arguments to the AbstractSciMLProblem are carried to the solver.","category":"page"},{"location":"interfaces/Problems/#problem_type","page":"SciMLProblems","title":"problem_type","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"AbstractSciMLProblem types include a non-public API definition of problem_type which holds a trait type corresponding to the way the AbstractSciMLProblem was constructed. For example, if a SecondOrderODEProblem constructor is used, the returned problem is simply a ODEProblem for interoperability with any ODEProblem algorithm. However, in this case the problem_type will be populated with the SecondOrderODEProblem type, indicating the original definition and extra structure.","category":"page"},{"location":"interfaces/Problems/#Remake","page":"SciMLProblems","title":"Remake","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"remake","category":"page"},{"location":"interfaces/Problems/#SciMLBase.remake","page":"SciMLProblems","title":"SciMLBase.remake","text":"remake(thing; )\n\nRe-construct thing with new field values specified by the keyword arguments.\n\n\n\n\n\nremake(prob::ODEProblem; f = missing, u0 = missing, tspan = missing,\n p = missing, kwargs = missing, _kwargs...)\n\nRemake the given ODEProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.\n\n\n\n\n\nremake(prob::BVProblem; f = missing, u0 = missing, tspan = missing,\n p = missing, kwargs = missing, problem_type = missing, _kwargs...)\n\nRemake the given BVProblem.\n\n\n\n\n\nremake(prob::SDEProblem; f = missing, u0 = missing, tspan = missing,\n p = missing, noise = missing, noise_rate_prototype = missing,\n seed = missing, kwargs = missing, _kwargs...)\n\nRemake the given SDEProblem.\n\n\n\n\n\nremake(prob::OptimizationProblem; f = missing, u0 = missing, p = missing,\n lb = missing, ub = missing, int = missing, lcons = missing, ucons = missing,\n sense = missing, kwargs = missing, _kwargs...)\n\nRemake the given OptimizationProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.\n\n\n\n\n\nremake(prob::NonlinearProblem; f = missing, u0 = missing, p = missing,\n problem_type = missing, kwargs = missing, _kwargs...)\n\nRemake the given NonlinearProblem. If u0 or p are given as symbolic maps ModelingToolkit.jl has to be loaded.\n\n\n\n\n\nremake(prob::NonlinearLeastSquaresProblem; f = missing, u0 = missing, p = missing,\n kwargs = missing, _kwargs...)\n\nRemake the given NonlinearLeastSquaresProblem.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Problems/#Problem-Traits","page":"SciMLProblems","title":"Problem Traits","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SciMLBase.isinplace(prob::SciMLBase.AbstractDEProblem)\nSciMLBase.is_diagonal_noise","category":"page"},{"location":"interfaces/Problems/#SciMLBase.isinplace-Tuple{SciMLBase.AbstractDEProblem}","page":"SciMLProblems","title":"SciMLBase.isinplace","text":"isinplace(prob::AbstractSciMLProblem)\n\nDetermine whether the function of the given problem operates in place or not.\n\n\n\n\n\n","category":"method"},{"location":"interfaces/Problems/#SciMLBase.is_diagonal_noise","page":"SciMLProblems","title":"SciMLBase.is_diagonal_noise","text":"is_diagonal_noise(prob::AbstractSciMLProblem)\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Problems/#AbstractSciMLProblem-API","page":"SciMLProblems","title":"AbstractSciMLProblem API","text":"","category":"section"},{"location":"interfaces/Problems/#Defaults-and-Preferences","page":"SciMLProblems","title":"Defaults and Preferences","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SpecializationLevel at SciMLBase can be used to set the default specialization level. The following shows how to set the specialization default to FullSpecialize:","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"using Preferences, UUIDs\nset_preferences!(UUID(\"0bca4576-84f4-4d90-8ffe-ffa030f20462\"), \"SpecializationLevel\" => \"FullSpecialize\")","category":"page"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"The default is AutoSpecialize.","category":"page"},{"location":"interfaces/Problems/#Abstract-SciMLProblems","page":"SciMLProblems","title":"Abstract SciMLProblems","text":"","category":"section"},{"location":"interfaces/Problems/","page":"SciMLProblems","title":"SciMLProblems","text":"SciMLBase.AbstractSciMLProblem\nSciMLBase.AbstractDEProblem\nSciMLBase.AbstractLinearProblem\nSciMLBase.AbstractNonlinearProblem\nSciMLBase.AbstractIntegralProblem\nSciMLBase.AbstractOptimizationProblem\nSciMLBase.AbstractNoiseProblem\nSciMLBase.AbstractODEProblem\nSciMLBase.AbstractDiscreteProblem\nSciMLBase.AbstractAnalyticalProblem\nSciMLBase.AbstractRODEProblem\nSciMLBase.AbstractSDEProblem\nSciMLBase.AbstractDAEProblem\nSciMLBase.AbstractDDEProblem\nSciMLBase.AbstractConstantLagDDEProblem\nSciMLBase.AbstractSecondOrderODEProblem\nSciMLBase.AbstractBVProblem\nSciMLBase.AbstractJumpProblem\nSciMLBase.AbstractSDDEProblem\nSciMLBase.AbstractConstantLagSDDEProblem\nSciMLBase.AbstractPDEProblem","category":"page"},{"location":"interfaces/Problems/#SciMLBase.AbstractSciMLProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSciMLProblem","text":"abstract type AbstractSciMLProblem\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDEProblem","text":"abstract type AbstractDEProblem <: SciMLBase.AbstractSciMLProblem\n\nBase type for all DifferentialEquations.jl problems. Concrete subtypes of AbstractDEProblem contain the necessary information to fully define a differential equation of the corresponding type.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractLinearProblem","page":"SciMLProblems","title":"SciMLBase.AbstractLinearProblem","text":"abstract type AbstractLinearProblem{bType, isinplace} <: SciMLBase.AbstractSciMLProblem\n\nBase for types which define linear systems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractNonlinearProblem","page":"SciMLProblems","title":"SciMLBase.AbstractNonlinearProblem","text":"abstract type AbstractNonlinearProblem{uType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define nonlinear solve problems (f(u)=0).\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractIntegralProblem","page":"SciMLProblems","title":"SciMLBase.AbstractIntegralProblem","text":"abstract type AbstractIntegralProblem{isinplace} <: SciMLBase.AbstractSciMLProblem\n\nBase for types which define integrals suitable for quadrature.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractOptimizationProblem","page":"SciMLProblems","title":"SciMLBase.AbstractOptimizationProblem","text":"abstract type AbstractOptimizationProblem{isinplace} <: SciMLBase.AbstractSciMLProblem\n\nBase for types which define equations for optimization.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractNoiseProblem","page":"SciMLProblems","title":"SciMLBase.AbstractNoiseProblem","text":"abstract type AbstractNoiseProblem <: SciMLBase.AbstractDEProblem\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractODEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractODEProblem","text":"abstract type AbstractODEProblem{uType, tType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define ODE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDiscreteProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDiscreteProblem","text":"abstract type AbstractDiscreteProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\nBase for types which define discrete problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractAnalyticalProblem","page":"SciMLProblems","title":"SciMLBase.AbstractAnalyticalProblem","text":"abstract type AbstractAnalyticalProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractRODEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractRODEProblem","text":"abstract type AbstractRODEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractDEProblem\n\nBase for types which define RODE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractSDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSDEProblem","text":"abstract type AbstractSDEProblem{uType, tType, isinplace, ND} <: SciMLBase.AbstractRODEProblem{uType, tType, isinplace, ND}\n\nBase for types which define SDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDAEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDAEProblem","text":"abstract type AbstractDAEProblem{uType, duType, tType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define DAE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractDDEProblem","text":"abstract type AbstractDDEProblem{uType, tType, lType, isinplace} <: SciMLBase.AbstractDEProblem\n\nBase for types which define DDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractConstantLagDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractConstantLagDDEProblem","text":"abstract type AbstractConstantLagDDEProblem{uType, tType, lType, isinplace} <: SciMLBase.AbstractDDEProblem{uType, tType, lType, isinplace}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractSecondOrderODEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSecondOrderODEProblem","text":"abstract type AbstractSecondOrderODEProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractBVProblem","page":"SciMLProblems","title":"SciMLBase.AbstractBVProblem","text":"abstract type AbstractBVProblem{uType, tType, isinplace} <: SciMLBase.AbstractODEProblem{uType, tType, isinplace}\n\nBase for types which define BVP problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractJumpProblem","page":"SciMLProblems","title":"SciMLBase.AbstractJumpProblem","text":"abstract type AbstractJumpProblem{P, J} <: SciMLBase.AbstractDEProblem\n\nBase for types which define jump problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractSDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractSDDEProblem","text":"abstract type AbstractSDDEProblem{uType, tType, lType, isinplace, ND} <: SciMLBase.AbstractDEProblem\n\nBase for types which define SDDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractConstantLagSDDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractConstantLagSDDEProblem","text":"abstract type AbstractConstantLagSDDEProblem{uType, tType, lType, isinplace, ND} <: SciMLBase.AbstractSDDEProblem{uType, tType, lType, isinplace, ND}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Problems/#SciMLBase.AbstractPDEProblem","page":"SciMLProblems","title":"SciMLBase.AbstractPDEProblem","text":"abstract type AbstractPDEProblem <: SciMLBase.AbstractDEProblem\n\nBase for types which define PDE problems.\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLSolutions","page":"SciMLSolutions","title":"SciMLSolutions","text":"","category":"section"},{"location":"interfaces/Solutions/#Definition-of-the-AbstractSciMLSolution-Interface","page":"SciMLSolutions","title":"Definition of the AbstractSciMLSolution Interface","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"All AbstractSciMLSolution types are a subset of some AbstractArray. Types with time series (like ODESolution) are subtypes of RecursiveArrayTools.AbstractVectorOfArray and RecursiveArrayTools.AbstractDiffEqArray where appropriate. Types without a time series (like OptimizationSolution) are directly subsets of AbstractArray.","category":"page"},{"location":"interfaces/Solutions/#Array-Interface","page":"SciMLSolutions","title":"Array Interface","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"Instead of working on the Vector{uType} directly, we can use the provided array interface.","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"to access the value at timestep j (if the timeseries was saved), and","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol.t[j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"to access the value of t at timestep j. For multi-dimensional systems, this will address first by component and lastly by time, and thus","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[i,j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"will be the ith component at timestep j. Hence, sol[j][i] == sol[i, j]. This is done because Julia is column-major, so the leading dimension should be contiguous in memory. If the independent variables had shape (for example, was a matrix), then i is the linear index. We can also access solutions with shape:","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[i,k,j]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"gives the [i,k] component of the system at timestep j. The colon operator is supported, meaning that","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"sol[i,:]","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"gives the timeseries for the ith component.","category":"page"},{"location":"interfaces/Solutions/#Common-Field-Names","page":"SciMLSolutions","title":"Common Field Names","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"u: the solution values\nt: the independent variable values, matching the length of the solution, if applicable\nresid: the residual of the solution, if applicable\noriginal: the solution object from the original solver, if it's a wrapper algorithm\nretcode: see the documentation section on return codes\nprob: the problem that was solved\nalg: the algorithm used to solve the problem","category":"page"},{"location":"interfaces/Solutions/#retcodes","page":"SciMLSolutions","title":"Return Codes (RetCodes)","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"The solution types have a retcode field which returns a SciMLBase.ReturnCode.T (from EnumX.jl, see that package for the semantics of handling EnumX types) signifying the error or satisfaction state of the solution.","category":"page"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.ReturnCode","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode","page":"SciMLSolutions","title":"SciMLBase.ReturnCode","text":"SciML.ReturnCode\n\nSciML.ReturnCode is the standard return code enum interface for the SciML interface. Return codes are notes given by the solvers to indicate the state of the solution, for example whether it successfully solved the equations, whether it failed to solve the equations, and importantly, why it exited.\n\nUsing SciML.ReturnCode\n\nSciML.ReturnCode use the interface of EnumX.jl and thus inherits all of the behaviors of being an EnumX. This includes the Enum type itself being referred to as SciML.ReturnCode.T, and each of the constituent enum states being referred to via getproperty, i.e. SciML.ReturnCode.Success.\n\nNote About Success Checking\n\nPrevious iterations of the interface suggested using sol.retcode == :Success, however, that is now not advised instead should be replaced with SciMLBase.successful_retcode(sol). The reason is that there are many different codes that can be interpreted as successful, such as ReturnCode.Terminated which means successfully used terminate!(integrator) to end an integration at a user-specified condition. As such, successful_retcode is the most general way to query for if the solver did not error.\n\nProperties\n\nsuccessful_retcode(retcode::ReturnCode.T): Determines whether the output enum is considered a success state of the solver, i.e. the solver successfully solved the equations. ReturnCode.Success is the most basic form, simply declaring that it was successful, but many more informative success return codes exist as well.\n\n\n\n\n\n","category":"module"},{"location":"interfaces/Solutions/#Return-Code-Traits","page":"SciMLSolutions","title":"Return Code Traits","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.successful_retcode","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.successful_retcode","page":"SciMLSolutions","title":"SciMLBase.successful_retcode","text":"successful_retcode(retcode::ReturnCode.T)::Bool successful_retcode(sol::AbstractSciMLSolution)::Bool\n\nReturns a boolean for whether a return code should be interpreted as a form of success.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Solutions/#Specific-Return-Codes","page":"SciMLSolutions","title":"Specific Return Codes","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.ReturnCode.Default\nSciMLBase.ReturnCode.Success\nSciMLBase.ReturnCode.Terminated\nSciMLBase.ReturnCode.DtNaN\nSciMLBase.ReturnCode.MaxIters\nSciMLBase.ReturnCode.DtLessThanMin\nSciMLBase.ReturnCode.Unstable\nSciMLBase.ReturnCode.InitialFailure\nSciMLBase.ReturnCode.ConvergenceFailure\nSciMLBase.ReturnCode.Failure\nSciMLBase.ReturnCode.ExactSolutionLeft\nSciMLBase.ReturnCode.ExactSolutionRight\nSciMLBase.ReturnCode.FloatingPointLimit","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Default","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Default","text":"ReturnCode.Default\n\nThe default state of the solver. If this return code is given, then the solving process is either still in process or the solver library has not been setup with the return code interface and thus the return code is undetermined.\n\nCommon Reasons for Seeing this Return Code\n\nA common reason for Default return codes is that a solver is a non-SciML solver which does not fully conform to the interface. Please open an issue if this is seen and it will be improved.\nAnother common reason for a Default return code is if the solver is probed internally before the solving process is done, such as through the callback interface. Return codes are set to Default to start and are changed to Success and other return codes upon finishing the solving process or hitting a numerical difficulty.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Success","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Success","text":"ReturnCode.Success\n\nThe success state of the solver. If this return code is given, then the solving process was successful, but no extra information about that success is given.\n\nCommon Reasons for Seeing this Return Code\n\nThis is the most common return code and most solvers will give this return code if the solving process went as expected without any errors or detected numerical issues.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Terminated","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Terminated","text":"ReturnCode.Terminated\n\nThe successful termination state of the solver. If this return code is given, then the solving process was successful at terminating the solve, usually through a callback affect! via terminate!(integrator).\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is if a user calls a callback which uses terminate!(integrator) to halt the integration at a user-chosen stopping point.\nAnother common reason for this return code is due to implicit terminate! statements in some library callbacks. For example, SteadyStateCallback uses terminate! internally, so solutions which reach steady state will have a ReturnCode.Terminated state instead of a ReturnCode.Success state. Similarly, problems solved via SteadyStateDiffEq.jl will have this ReturnCode.Terminated state if a timestepping method is used to solve to steady state.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.DtNaN","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.DtNaN","text":"ReturnCode.DtNaN\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was determined to be NaN and thus the solver could not continue.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the automatic dt selection algorithm is used but the starting derivative has a NaN or Inf derivative term. Double check that the f(u0,p,t0) term is well-defined without NaN or Inf values.\nAnother common reason for this return code is because of a user set dt which is calculated to be a NaN. If solve(prob,alg,dt=x), double check that x is not NaN.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.MaxIters","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.MaxIters","text":"ReturnCode.MaxIters\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the solver's iterations hit the maxiters either set by default or by the user in the solve/init command.\n\nNote about Nonlinear Optimization\n\nIn nonlinear optimization, many solvers (such as OptimizationOptimisers.Adam) do not have an exit criteria other than iters == maxiters. In this case, the solvers will iterate until maxiters and exit with a Success return code, as that is a successful run of the solver and not considered to be an error state. Solves with early termination criteria, such as Optim.BFGS exiting when the gradient is sufficiently close to zero, will give ReturnCode.MaxIters on exits which require the maximum iteration.\n\nCommon Reasons for Seeing this Return Code\n\nThis commonly occurs in ODE solving if a non-stiff method (e.g. Tsit5) is used in an algorithm choice for a stiff ODE. It is recommended that in such cases, one tries a stiff ODE solver.\nThis commonly occurs in optimization and nonlinear solvers if the tolerance on solve to too low and cannot be achieved due to floating point error or the condition number of the solver matrix. Double check that the chosen tolerance is numerically possible.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.DtLessThanMin","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.DtLessThanMin","text":"ReturnCode.DtLessThanMin\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the dt of the integration was made to be less than dtmin, i.e. dt < dtmin.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the integration is going unstable. As f(u,p,t) -> infinity, the time steps required by the solver to accurately handle the dynamics decreases. When it gets sufficiently small, dtmin, an exit is thrown as the solution is likely unstable. dtmin is also chosen to be around the value where floating point issues cause t + dt == t, and thus a dt of that size is impossible at floating point precision.\nAnother common reason for this return code is if domain constraints are set, such as by using isoutofdomain, but the domain constraint is incorrect. For example, if one is solving the ODE f(u,p,t) = -u - 1, one may think \"but I want a solution with u > 0 and thus I will set isoutofdomain(u,p,t) = u < 0. However, the true solution of this ODE is not positive, and thus what will occur is that the solver will try to decrease dt until it can give an accurate solution that is positive. As this is impossible, it will continue to shrink the dt until dt < dtmin and then exit with this return code.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Unstable","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Unstable","text":"ReturnCode.Unstable\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful and exited early because the unstable_check function, as given by the unstable_check common keyword argument (or its default), give a true at the current state.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because u contains a NaN or Inf value. The default unstable_check only checks for these values.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.InitialFailure","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.InitialFailure","text":"ReturnCode.InitialFailure\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because the initialization process failed.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the initialization process of a DAE solver failed to find consistent initial conditions, which can occur if the differentiation index of the DAE solver is too high. Most DAE solvers only allow for index-1 DAEs, and so an index-2 DAE will fail during this initialization. To solve this kind of problem, use ModelingToolkit.jl and its structural_simplify method to reduce the index of the DAE.\nAnother common reason for this return code is if the initial condition was not suitable for the numerical solve. For example, the initial point had a NaN or Inf. Or in optimization, this can occur if the initial point is outside of the bound constraints given by the user.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.ConvergenceFailure","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.ConvergenceFailure","text":"ReturnCode.ConvergenceFailure\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful because internal nonlinear solver iterations failed to converge.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because an inappropriate nonlinear solver was chosen. If fixed point iteration is used on a stiff problem, it will be faster by avoiding the Jacobian but it will make a stiff ODE solver not stable for stiff problems!\nFor nonlinear solvers, this can occur if certain threshold was exceeded. For example, in approximate jacobian solvers like Broyden, Klement, etc. if the number of jacobian resets exceeds the threshold, then this return code is given.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.Failure","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.Failure","text":"ReturnCode.Failure\n\nA failure exit state of the solver. If this return code is given, then the solving process was unsuccessful but no extra information is given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for seeing this return code is because the solver is a wrapped solver (i.e. a Fortran code) which does not provide any extra information about its exit state. If this is from a Julia-based solver, please open an issue.\n\nProperties\n\nsuccessful_retcode = false\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.ExactSolutionLeft","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.ExactSolutionLeft","text":"ReturnCode.ExactSolutionLeft\n\nThe success state of the solver. If this return code is given, then the solving process was successful, and the left solution was given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the left for x.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.ExactSolutionRight","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.ExactSolutionRight","text":"ReturnCode.ExactSolutionRight\n\nThe success state of the solver. If this return code is given, then the solving process was successful, and the right solution was given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for this return code is via a bracketing nonlinear solver, such as bisection, iterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the first floating point value to the right for x.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#SciMLBase.ReturnCode.FloatingPointLimit","page":"SciMLSolutions","title":"SciMLBase.ReturnCode.FloatingPointLimit","text":"ReturnCode.FloatingPointLimit\n\nThe success state of the solver. If this return code is given, then the solving process was successful, and the closest floating point value to the solution was given.\n\nCommon Reasons for Seeing this Return Code\n\nThe most common reason for this return code is via a nonlinear solver, such as Falsi,\n\niterating to convergence is unable to give the exact f(x)=0 solution due to floating point precision issues, and thus it gives the closest floating point value to the true solution for x.\n\nProperties\n\nsuccessful_retcode = true\n\n\n\n\n\n","category":"constant"},{"location":"interfaces/Solutions/#Solution-Traits","page":"SciMLSolutions","title":"Solution Traits","text":"","category":"section"},{"location":"interfaces/Solutions/#AbstractSciMLSolution-API","page":"SciMLSolutions","title":"AbstractSciMLSolution API","text":"","category":"section"},{"location":"interfaces/Solutions/#Abstract-SciML-Solutions","page":"SciMLSolutions","title":"Abstract SciML Solutions","text":"","category":"section"},{"location":"interfaces/Solutions/","page":"SciMLSolutions","title":"SciMLSolutions","text":"SciMLBase.AbstractSciMLSolution\nSciMLBase.AbstractNoTimeSolution\nSciMLBase.AbstractTimeseriesSolution\nSciMLBase.AbstractNoiseProcess\nSciMLBase.AbstractEnsembleSolution\nSciMLBase.AbstractLinearSolution\nSciMLBase.AbstractNonlinearSolution\nSciMLBase.AbstractIntegralSolution\nSciMLBase.AbstractSteadyStateSolution\nSciMLBase.AbstractAnalyticalSolution\nSciMLBase.AbstractODESolution\nSciMLBase.AbstractDDESolution\nSciMLBase.AbstractRODESolution\nSciMLBase.AbstractDAESolution","category":"page"},{"location":"interfaces/Solutions/#SciMLBase.AbstractSciMLSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractSciMLSolution","text":"Union of all base solution types.\n\nUses a Union so that solution types can be <: AbstractArray\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractNoTimeSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractNoTimeSolution","text":"abstract type AbstractNoTimeSolution{T, N} <: AbstractArray{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractTimeseriesSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractTimeseriesSolution","text":"abstract type AbstractTimeseriesSolution{T, N, A} <: RecursiveArrayTools.AbstractDiffEqArray{T, N, A}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractNoiseProcess","page":"SciMLSolutions","title":"SciMLBase.AbstractNoiseProcess","text":"abstract type AbstractNoiseProcess{T, N, A, isinplace} <: RecursiveArrayTools.AbstractDiffEqArray{T, N, A}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractEnsembleSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractEnsembleSolution","text":"abstract type AbstractEnsembleSolution{T, N, A} <: RecursiveArrayTools.AbstractVectorOfArray{T, N, A}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractLinearSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractLinearSolution","text":"abstract type AbstractLinearSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractNonlinearSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractNonlinearSolution","text":"abstract type AbstractNonlinearSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractIntegralSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractIntegralSolution","text":"abstract type AbstractIntegralSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractSteadyStateSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractSteadyStateSolution","text":"abstract type AbstractNonlinearSolution{T, N} <: SciMLBase.AbstractNoTimeSolution{T, N}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractAnalyticalSolution","page":"SciMLSolutions","title":"SciMLBase.AbstractAnalyticalSolution","text":"abstract type AbstractAnalyticalSolution{T, N, S} <: SciMLBase.AbstractTimeseriesSolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractODESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractODESolution","text":"abstract type AbstractODESolution{T, N, S} <: SciMLBase.AbstractTimeseriesSolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractDDESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractDDESolution","text":"abstract type AbstractDDESolution{T, N, S} <: SciMLBase.AbstractODESolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractRODESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractRODESolution","text":"abstract type AbstractRODESolution{T, N, S} <: SciMLBase.AbstractODESolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Solutions/#SciMLBase.AbstractDAESolution","page":"SciMLSolutions","title":"SciMLBase.AbstractDAESolution","text":"abstract type AbstractDAESolution{T, N, S} <: SciMLBase.AbstractODESolution{T, N, S}\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Common_Keywords/#Common-Keyword-Arguments","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"The following defines the keyword arguments which are meant to be preserved throughout all of the AbstractSciMLProblem cases (where applicable).","category":"page"},{"location":"interfaces/Common_Keywords/#Default-Algorithm-Hinting","page":"Common Keyword Arguments","title":"Default Algorithm Hinting","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"To help choose the default algorithm, the keyword argument alg_hints is provided to solve. alg_hints is a Vector{Symbol} which describe the problem at a high level to the solver. The options are:","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"This functionality is derived via the benchmarks in SciMLBenchmarks.jl","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"Currently this is only implemented for the differential equation solvers.","category":"page"},{"location":"interfaces/Common_Keywords/#Output-Control","page":"Common Keyword Arguments","title":"Output Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"These arguments control the output behavior of the solvers. It defaults to maximum output to give the best interactive user experience, but can be reduced all the way to only saving the solution at the final timepoint.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"The following options are all related to output control. See the \"Examples\" section at the end of this page for some example usage.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"dense: Denotes whether to save the extra pieces required for dense (continuous) output. Default is save_everystep && !isempty(saveat) for algorithms which have the ability to produce dense output, i.e. by default it's true unless the user has turned off saving on steps or has chosen a saveat value. If dense=false, the solution still acts like a function, and sol(t) is a linear interpolation between the saved time points.\nsaveat: Denotes specific times to save the solution at, during the solving phase. The solver will save at each of the timepoints in this array in the most efficient manner available to the solver. If only saveat is given, then the arguments save_everystep and dense are false by default. If saveat is given a number, then it will automatically expand to tspan[1]:saveat:tspan[2]. For methods where interpolation is not possible, saveat may be equivalent to tstops. The default value is [].\nsave_idxs: Denotes the indices for the components of the equation to save. Defaults to saving all indices. For example, if you are solving a 3-dimensional ODE, and given save_idxs = [1, 3], only the first and third components of the solution will be outputted. Notice that of course in this case the outputted solution will be two-dimensional.\ntstops: Denotes extra times that the timestepping algorithm must step to. This should be used to help the solver deal with discontinuities and singularities, since stepping exactly at the time of the discontinuity will improve accuracy. If a method cannot change timesteps (fixed timestep multistep methods), then tstops will use an interpolation, matching the behavior of saveat. If a method cannot change timesteps and also cannot interpolate, then tstops must be a multiple of dt or else an error will be thrown. Default is [].\nd_discontinuities: Denotes locations of discontinuities in low order derivatives. This will force FSAL algorithms which assume derivative continuity to re-evaluate the derivatives at the point of discontinuity. The default is [].\nsave_everystep: Saves the result at every step. Default is true if isempty(saveat).\nsave_on: Denotes whether intermediate solutions are saved. This overrides the settings of dense, saveat and save_everystep and is used by some applications to manually turn off saving temporarily. Everyday use of the solvers should leave this unchanged. Defaults to true.\nsave_start: Denotes whether the initial condition should be included in the solution type as the first timepoint. Defaults to true.\nsave_end: Denotes whether the final timepoint is forced to be saved, regardless of the other saving settings. Defaults to true.\ninitialize_save: Denotes whether to save after the callback initialization phase (when u_modified=true). Defaults to true.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"Note that dense requires save_everystep=true and saveat=false.","category":"page"},{"location":"interfaces/Common_Keywords/#Stepsize-Control","page":"Common Keyword Arguments","title":"Stepsize Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"These arguments control the timestepping routines.","category":"page"},{"location":"interfaces/Common_Keywords/#Basic-Stepsize-Control","page":"Common Keyword Arguments","title":"Basic Stepsize Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"adaptive: Turns on adaptive timestepping for appropriate methods. Default is true.\nabstol: Absolute tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).\nreltol: Relative tolerance in adaptive timestepping. This is the tolerance on local error estimates, not necessarily the global error (though these quantities are related).\ndt: Sets the initial stepsize. This is also the stepsize for fixed timestep methods. Defaults to an automatic choice if the method is adaptive.\ndtmax: Maximum dt for adaptive timestepping. Defaults are package-dependent.\ndtmin: Minimum dt for adaptive timestepping. Defaults are package-dependent.","category":"page"},{"location":"interfaces/Common_Keywords/#Fixed-Stepsize-Usage","page":"Common Keyword Arguments","title":"Fixed Stepsize Usage","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"Note that if a method does not have adaptivity, the following rules apply:","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"If dt is set, then the algorithm will step with size dt each iteration.\nIf tstops and dt are both set, then the algorithm will step with either a size dt, or use a smaller step to hit the tstops point.\nIf tstops is set without dt, then the algorithm will step directly to each value in tstops\nIf neither dt nor tstops are set, the solver will throw an error.","category":"page"},{"location":"interfaces/Common_Keywords/#Memory-Optimizations","page":"Common Keyword Arguments","title":"Memory Optimizations","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"alias_u0: allows the solver to alias the initial condition array that is contained in the problem struct. Defaults to false.\ncache: pass a solver cache to decrease the construction time. This is not implemented for any of the problem interfaces at this moment.","category":"page"},{"location":"interfaces/Common_Keywords/#Miscellaneous","page":"Common Keyword Arguments","title":"Miscellaneous","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"maxiters: Maximum number of iterations before stopping.\ncallback: Specifies a callback function that is called between iterations.\nverbose: Toggles whether warnings are thrown when the solver exits early. Defaults to true.","category":"page"},{"location":"interfaces/Common_Keywords/#Progress-Monitoring","page":"Common Keyword Arguments","title":"Progress Monitoring","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"These arguments control the usage of the progressbar in the logger.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"progress: Turns on/off the Juno progressbar. Default is false.\nprogress_steps: Numbers of steps between updates of the progress bar. Default is 1000.\nprogress_name: Controls the name of the progressbar. Default is the name of the problem type.\nprogress_message: Controls the message with the progressbar. Defaults to showing dt, t, the maximum of u.","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"The progress bars all use the Julia Logging interface in order to be generic to the IDE or programming tool that is used. For more information on how this is all put together, see this discussion.","category":"page"},{"location":"interfaces/Common_Keywords/#Error-Calculations","page":"Common Keyword Arguments","title":"Error Calculations","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"If you are using the test problems (i.e. SciMLFunctions where f.analytic is defined), then options control the errors which are calculated. By default, any cheap error estimates are always calculated. Extra keyword arguments include:","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"timeseries_errors\ndense_errors","category":"page"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"for specifying more expensive errors.","category":"page"},{"location":"interfaces/Common_Keywords/#Automatic-Differentiation-Control","page":"Common Keyword Arguments","title":"Automatic Differentiation Control","text":"","category":"section"},{"location":"interfaces/Common_Keywords/","page":"Common Keyword Arguments","title":"Common Keyword Arguments","text":"See the Automatic Differentiation page for a full description of sensealg","category":"page"},{"location":"interfaces/Algorithms/#SciMLAlgorithms","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"","category":"section"},{"location":"interfaces/Algorithms/#Definition-of-the-AbstractSciMLAlgorithm-Interface","page":"SciMLAlgorithms","title":"Definition of the AbstractSciMLAlgorithm Interface","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"SciMLAlgorithms are defined as types which have dispatches to the function signature:","category":"page"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"CommonSolve.solve(prob::AbstractSciMLProblem,alg::AbstractSciMLAlgorithm;kwargs...)","category":"page"},{"location":"interfaces/Algorithms/#Algorithm-Specific-Arguments","page":"SciMLAlgorithms","title":"Algorithm-Specific Arguments","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"Note that because the keyword arguments of solve are designed to be common across the whole problem type, algorithms should have the algorithm-specific keyword arguments defined as part of the algorithm constructor. For example, Rodas5 has a choice of autodiff::Bool which is not common across all ODE solvers, and thus autodiff is a algorithm-specific keyword argument handled via Rodas5(autodiff=true).","category":"page"},{"location":"interfaces/Algorithms/#Remake","page":"SciMLAlgorithms","title":"Remake","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"Note that remake is applicable to AbstractSciMLAlgorithm types, but this is not used in the public API. It's used for solvers to swap out components like ForwardDiff chunk sizes.","category":"page"},{"location":"interfaces/Algorithms/#Common-Algorithm-Keyword-Arguments","page":"SciMLAlgorithms","title":"Common Algorithm Keyword Arguments","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"Commonly used algorithm keyword arguments are:","category":"page"},{"location":"interfaces/Algorithms/#Traits","page":"SciMLAlgorithms","title":"Traits","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"SciMLBase.isautodifferentiable\nSciMLBase.allows_arbitrary_number_types\nSciMLBase.allowscomplex\nSciMLBase.isadaptive\nSciMLBase.isdiscrete\nSciMLBase.forwarddiffs_model\nSciMLBase.forwarddiffs_model_time","category":"page"},{"location":"interfaces/Algorithms/#SciMLBase.isautodifferentiable","page":"SciMLAlgorithms","title":"SciMLBase.isautodifferentiable","text":"isautodifferentiable(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.\n\nDefaults to false as only pure-Julia algorithms can have this be true.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.allows_arbitrary_number_types","page":"SciMLAlgorithms","title":"SciMLBase.allows_arbitrary_number_types","text":"allowsarbitrarynumber_types(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm is compatible with direct automatic differentiation, i.e. can have algorithms like ForwardDiff or ReverseDiff attempt to differentiate directly through the solver.\n\nDefaults to false as only pure-Julia algorithms can have this be true.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.allowscomplex","page":"SciMLAlgorithms","title":"SciMLBase.allowscomplex","text":"allowscomplex(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm is compatible with having complex numbers as the state variables.\n\nDefaults to false.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.isadaptive","page":"SciMLAlgorithms","title":"SciMLBase.isadaptive","text":"isadaptive(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm uses adaptivity, i.e. has a non-quasi-static compute graph.\n\nDefaults to true.\n\n\n\n\n\nis_integrator_adaptive(i::DEIntegrator)\n\nChecks if the integrator is adaptive\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.isdiscrete","page":"SciMLAlgorithms","title":"SciMLBase.isdiscrete","text":"isdiscrete(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm allows for discrete state values, such as integers.\n\nDefaults to false.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.forwarddiffs_model","page":"SciMLAlgorithms","title":"SciMLBase.forwarddiffs_model","text":"forwarddiffs_model(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm uses ForwardDiff.jl on the model function is called with ForwardDiff.jl\n\nDefaults to false as only pure-Julia algorithms can have this be true.\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#SciMLBase.forwarddiffs_model_time","page":"SciMLAlgorithms","title":"SciMLBase.forwarddiffs_model_time","text":"forwarddiffsmodeltime(alg::AbstractDEAlgorithm)\n\nTrait declaration for whether an algorithm uses ForwardDiff.jl on the model f(u,p,t) function is called with ForwardDiff.jl on the t argument.\n\nDefaults to false as only a few pure-Julia algorithms (Rosenbrock methods) have this as true\n\n\n\n\n\n","category":"function"},{"location":"interfaces/Algorithms/#Abstract-SciML-Algorithms","page":"SciMLAlgorithms","title":"Abstract SciML Algorithms","text":"","category":"section"},{"location":"interfaces/Algorithms/","page":"SciMLAlgorithms","title":"SciMLAlgorithms","text":"SciMLBase.AbstractSciMLAlgorithm\nSciMLBase.AbstractDEAlgorithm\nSciMLBase.AbstractLinearAlgorithm\nSciMLBase.AbstractNonlinearAlgorithm\nSciMLBase.AbstractIntervalNonlinearAlgorithm\nSciMLBase.AbstractQuadratureAlgorithm\nSciMLBase.AbstractOptimizationAlgorithm\nSciMLBase.AbstractSteadyStateAlgorithm\nSciMLBase.AbstractODEAlgorithm\nSciMLBase.AbstractSecondOrderODEAlgorithm\nSciMLBase.AbstractRODEAlgorithm\nSciMLBase.AbstractSDEAlgorithm\nSciMLBase.AbstractDAEAlgorithm\nSciMLBase.AbstractDDEAlgorithm\nSciMLBase.AbstractSDDEAlgorithm","category":"page"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSciMLAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSciMLAlgorithm","text":"abstract type AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractDEAlgorithm","text":"abstract type AbstractDEAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractLinearAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractLinearAlgorithm","text":"abstract type AbstractLinearAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractNonlinearAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractNonlinearAlgorithm","text":"abstract type AbstractNonlinearAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractIntervalNonlinearAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractIntervalNonlinearAlgorithm","text":"abstract type AbstractIntervalNonlinearAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractQuadratureAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractQuadratureAlgorithm","text":"abstract type AbstractIntegralAlgorithm <: SciMLBase.AbstractSciMLAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractOptimizationAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractOptimizationAlgorithm","text":"abstract type AbstractOptimizationAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSteadyStateAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSteadyStateAlgorithm","text":"abstract type AbstractSteadyStateAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractODEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractODEAlgorithm","text":"abstract type AbstractODEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSecondOrderODEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSecondOrderODEAlgorithm","text":"abstract type AbstractSecondOrderODEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractRODEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractRODEAlgorithm","text":"abstract type AbstractRODEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSDEAlgorithm","text":"abstract type AbstractSDEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractDAEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractDAEAlgorithm","text":"abstract type AbstractDAEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractDDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractDDEAlgorithm","text":"abstract type AbstractDDEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Algorithms/#SciMLBase.AbstractSDDEAlgorithm","page":"SciMLAlgorithms","title":"SciMLBase.AbstractSDDEAlgorithm","text":"abstract type AbstractSDDEAlgorithm <: SciMLBase.AbstractDEAlgorithm\n\n\n\n\n\n","category":"type"},{"location":"interfaces/Init_Solve/#The-SciML-init-and-solve-Functions","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"","category":"section"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"solve function has the default definition","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"solve(args...; kwargs...) = solve!(init(args...; kwargs...))","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"The interface for the three functions is as follows:","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"init(::ProblemType, args...; kwargs...) :: IteratorType\nsolve!(::IteratorType) :: SolutionType","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"where ProblemType, IteratorType, and SolutionType are the types defined in your package.","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"To avoid method ambiguity, the first argument of solve, solve!, and init must be dispatched on the type defined in your package. For example, do not define a method such as","category":"page"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"init(::AbstractVector, ::AlgorithmType)","category":"page"},{"location":"interfaces/Init_Solve/#init-and-the-Iterator-Interface","page":"The SciML init and solve Functions","title":"init and the Iterator Interface","text":"","category":"section"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"init's return gives an IteratorType which is designed to allow the user to have more direct handling over the internal solving process. Because of this internal nature, the IteratorType has a less unified interface across problem types than other portions like ProblemType and SolutionType. For example, for differential equations this is the Integrator Interface designed for mutating solutions in a manner for callback implementation, which is distinctly different from the LinearSolve init interface which is designed for caching efficiency with reusing factorizations.","category":"page"},{"location":"interfaces/Init_Solve/#__solve-and-High-Level-Handling","page":"The SciML init and solve Functions","title":"__solve and High-Level Handling","text":"","category":"section"},{"location":"interfaces/Init_Solve/","page":"The SciML init and solve Functions","title":"The SciML init and solve Functions","text":"While init and solve are the common entry point for users, solver packages will mostly define dispatches on SciMLBase.__init and SciMLBase.__solve. The reason is because this allows for SciMLBase.init and SciMLBase.solve to have common implementations across all solvers for doing things such as checking for common errors and throwing high level messages. Solvers can opt-out of the high level error handling by directly defining SciMLBase.init and SciMLBase.solve instead, though this is not recommended in order to allow for uniformity of the error messages.","category":"page"},{"location":"interfaces/Differentiation/#sensealg","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"","category":"section"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"Automatic differentiation control is done through the sensealg keyword argument. Hooks exist in the high level interfaces for solve which shuttle the definitions of automatic differentiation overloads to dispatches defined in DiffEqSensitivity.jl (should be renamed SciMLSensitivity.jl as it expands). This is done by first entering a top-level solve definition, for example:","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"function solve(prob::AbstractDEProblem, args...; sensealg=nothing,\n u0=nothing, p=nothing, kwargs...)\n u0 = u0 !== nothing ? u0 : prob.u0\n p = p !== nothing ? p : prob.p\n if sensealg === nothing && haskey(prob.kwargs, :sensealg)\n sensealg = prob.kwargs[:sensealg]\n end\n solve_up(prob, sensealg, u0, p, args...; kwargs...)\nend","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"solve_up then drops down the differentiable arguments as positional arguments, which is required for the ChainRules.jl interface. Then the ChainRules overloads are written on the solve_up calls, like:","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"function ChainRulesCore.frule(::typeof(solve_up), prob,\n sensealg::Union{Nothing,AbstractSensitivityAlgorithm},\n u0, p, args...;\n kwargs...)\n _solve_forward(prob, sensealg, u0, p, args...; kwargs...)\nend\n\nfunction ChainRulesCore.rrule(::typeof(solve_up), prob::SciMLBase.AbstractDEProblem,\n sensealg::Union{Nothing,AbstractSensitivityAlgorithm},\n u0, p, args...;\n kwargs...)\n _solve_adjoint(prob, sensealg, u0, p, args...; kwargs...)\nend","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"Default definitions then exist to throw an informative error if the sensitivity mechanism is not added:","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"function _concrete_solve_adjoint(args...; kwargs...)\n error(\"No adjoint rules exist. Check that you added `using DiffEqSensitivity`\")\nend\n\nfunction _concrete_solve_forward(args...; kwargs...)\n error(\"No sensitivity rules exist. Check that you added `using DiffEqSensitivity`\")\nend","category":"page"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"The sensitivity mechanism is kept in a separate package because of the high dependency and load time cost introduced by the automatic differentiation libraries. Different choices of automatic differentiation are then selected by the sensealg keyword argument in solve, which is made into a positional argument in the _solve_adjoint and other functions in order to allow dispatch.","category":"page"},{"location":"interfaces/Differentiation/#SensitivityADPassThrough","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"SensitivityADPassThrough","text":"","category":"section"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"The special sensitivity algorithm SensitivityADPassThrough is used to ignore the internal sensitivity dispatches and instead do automatic differentiation directly through the solver. Generally this sensealg is only used internally.","category":"page"},{"location":"interfaces/Differentiation/#Note-about-ForwardDiff","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Note about ForwardDiff","text":"","category":"section"},{"location":"interfaces/Differentiation/","page":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","title":"Automatic Differentiation and Sensitivity Algorithms (Adjoints)","text":"ForwardDiff does not use ChainRules.jl and thus it completely ignores the special handling.","category":"page"},{"location":"#The-SciML-Common-Interface-for-Julia-Equation-Solvers","page":"Home","title":"The SciML Common Interface for Julia Equation Solvers","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The SciML common interface ties together the numerical solvers of the Julia package ecosystem into a single unified interface. It is designed for maximal efficiency and parallelism, while incorporating essential features for large-scale scientific machine learning such as differentiability, composability, and sparsity.","category":"page"},{"location":"","page":"Home","title":"Home","text":"This documentation is made to pool together the docs of the various SciML libraries to paint the overarching picture, establish development norms, and document the shared/common functionality.","category":"page"},{"location":"#Domains-of-SciML","page":"Home","title":"Domains of SciML","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The SciML common interface covers the following domains:","category":"page"},{"location":"","page":"Home","title":"Home","text":"Linear systems (LinearProblem)\nDirect methods for dense and sparse\nIterative solvers with preconditioning\nNonlinear Systems (NonlinearProblem)\nRootfinding for systems of nonlinear equations\nInterval Nonlinear Systems\nBracketing rootfinders for nonlinear equations with interval bounds\nIntegrals (quadrature) (IntegralProblem)\nDifferential Equations\nDiscrete equations (function maps, discrete stochastic (Gillespie/Markov) simulations) (DiscreteProblem)\nOrdinary differential equations (ODEs) (ODEProblem)\nSplit and Partitioned ODEs (Symplectic integrators, IMEX Methods) (SplitODEProblem)\nStochastic ordinary differential equations (SODEs or SDEs) (SDEProblem)\nStochastic differential-algebraic equations (SDAEs) (SDEProblem with mass matrices)\nRandom differential equations (RODEs or RDEs) (RODEProblem)\nDifferential algebraic equations (DAEs) (DAEProblem and ODEProblem with mass matrices)\nDelay differential equations (DDEs) (DDEProblem)\nNeutral, retarded, and algebraic delay differential equations (NDDEs, RDDEs, and DDAEs)\nStochastic delay differential equations (SDDEs) (SDDEProblem)\nExperimental support for stochastic neutral, retarded, and algebraic delay differential equations (SNDDEs, SRDDEs, and SDDAEs)\nMixed discrete and continuous equations (Hybrid Equations, Jump Diffusions) (AbstractDEProblems with callbacks)\nOptimization (OptimizationProblem)\nNonlinear (constrained) optimization\n(Stochastic/Delay/Differential-Algebraic) Partial Differential Equations (PDESystem)\nFinite difference and finite volume methods\nInterfaces to finite element methods\nPhysics-Informed Neural Networks (PINNs)\nIntegro-Differential Equations\nFractional Differential Equations","category":"page"},{"location":"","page":"Home","title":"Home","text":"The SciML common interface also includes ModelingToolkit.jl for defining such systems symbolically, allowing for optimizations like automated generation of parallel code, symbolic simplification, and generation of sparsity patterns.","category":"page"},{"location":"#Extended-SciML-Domain","page":"Home","title":"Extended SciML Domain","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"In addition to the purely numerical representations of mathematical objects, there are also sets of problem types associated with common mathematical algorithms. These are:","category":"page"},{"location":"","page":"Home","title":"Home","text":"Data-driven modeling\nDiscrete-time data-driven dynamical systems (DiscreteDataDrivenProblem)\nContinuous-time data-driven dynamical systems (ContinuousDataDrivenProblem)\nSymbolic regression (DirectDataDrivenProblem)\nUncertainty quantification and expected values (ExpectationProblem)","category":"page"},{"location":"#Inverse-Problems,-Parameter-Estimation,-and-Structural-Identification","page":"Home","title":"Inverse Problems, Parameter Estimation, and Structural Identification","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"We note that parameter estimation and inverse problems are solved directly on their constituent problem types using tools like DiffEqFlux.jl. Thus for example, there is no ODEInverseProblem, and instead ODEProblem is used to find the parameters p that solve the inverse problem.","category":"page"},{"location":"#Common-Interface-High-Level","page":"Home","title":"Common Interface High Level","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The SciML interface is common as the usage of arguments is standardized across all of the problem domains. Underlying high level ideas include:","category":"page"},{"location":"","page":"Home","title":"Home","text":"All domains use the same interface of defining a AbstractSciMLProblem which is then solved via solve(prob,alg;kwargs), where alg is a AbstractSciMLAlgorithm. The keyword argument namings are standardized across the organization.\nAbstractSciMLProblems are generally defined by a SciMLFunction which can define extra details about a model function, such as its analytical Jacobian, its sparsity patterns and so on.\nThere is an organization-wide method for defining linear and nonlinear solvers used within other solvers, giving maximum control of performance to the user.\nTypes used within the packages are defined by the input types. For example, packages attempt to internally use the type of the initial condition as the type for the state within differential equation solvers.\nsolve calls should be thread-safe and parallel-safe.\ninit(prob,alg;kwargs) returns an iterator which allows for directly iterating over the solution process\nHigh performance is key. Any performance that is not at the top level is considered a bug and should be reported as such.\nAll functions have an in-place and out-of-place form, where the in-place form is made to utilize mutation for high performance on large-scale problems and the out-of-place form is for compatibility with tooling like static arrays and some reverse-mode automatic differentiation systems.","category":"page"},{"location":"#User-Facing-Solver-Libraries","page":"Home","title":"User-Facing Solver Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"DifferentialEquations.jl\nMulti-package interface of high performance numerical solvers of differential equations\nModelingToolkit.jl\nThe symbolic modeling package which implements the SciML symbolic common interface.\nLinearSolve.jl\nMulti-package interface for specifying linear solvers (direct, sparse, and iterative), along with tools for caching and preconditioners for use in large-scale modeling.\nNonlinearSolve.jl\nHigh performance numerical solving of nonlinear systems.\nIntegrals.jl\nMulti-package interface for high performance, batched, and parallelized numerical quadrature.\nOptimization.jl\nMulti-package interface for numerical solving of optimization problems.\nNeuralPDE.jl\nPhysics-Informed Neural Network (PINN) package for transforming partial differential equations into optimization problems.\nDiffEqOperators.jl\nAutomated finite difference method (FDM) package for transforming partial differential equations into nonlinear problems and ordinary differential equations.\nDiffEqFlux.jl\nHigh level package for scientific machine learning applications, such as neural and universal differential equations, solving of inverse problems, parameter estimation, nonlinear optimal control, and more.\nDataDrivenDiffEq.jl\nMulti-package interface for data-driven modeling, Koopman dynamic mode decomposition, symbolic regression/sparsification, and automated model discovery.\nSciMLExpectations.jl\nExtension to the dynamical modeling tools for calculating expectations.","category":"page"},{"location":"#Interface-Implementation-Libraries","page":"Home","title":"Interface Implementation Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"SciMLBase.jl\nThe core package defining the interface which is consumed by the modeling and solver packages.\nDiffEqBase.jl\nThe core package defining the extended interface which is consumed by the differential equation solver packages.\nSciMLSensitivity.jl\nA package which pools together the definition of derivative overloads to define the common sensealg automatic differentiation interface.\nDiffEqNoiseProcess.jl\nA package which defines the stochastic AbstractNoiseProcess interface for the SciML ecosystem.\nRecursiveArrayTools.jl\nA package which defines the underlying AbstractVectorOfArray structure used as the output for all time series results.\nArrayInterface.jl\nThe package which defines the extended AbstractArray interface employed throughout the SciML ecosystem.","category":"page"},{"location":"#Using-Facing-Modeling-Libraries","page":"Home","title":"Using-Facing Modeling Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"There are too many to name here and this will be populated when there is time!","category":"page"},{"location":"#Flowchart-Example-for-PDE-Constrained-Optimal-Control","page":"Home","title":"Flowchart Example for PDE-Constrained Optimal Control","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"The following example showcases how the pieces of the common interface connect to solve a problem that mixes inference, symbolics, and numerics.","category":"page"},{"location":"","page":"Home","title":"Home","text":"(Image: )","category":"page"},{"location":"#External-Binding-Libraries","page":"Home","title":"External Binding Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"diffeqr\nSolving differential equations in R using DifferentialEquations.jl with ModelingToolkit for JIT compilation and GPU-acceleration\ndiffeqpy\nSolving differential equations in Python using DifferentialEquations.jl","category":"page"},{"location":"#Solver-Libraries","page":"Home","title":"Solver Libraries","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"There are too many to name here. Check out the SciML Organization Github Page for details.","category":"page"},{"location":"#Contributing","page":"Home","title":"Contributing","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Please refer to the SciML ColPrac: Contributor's Guide on Collaborative Practices for Community Packages for guidance on PRs, issues, and other matters relating to contributing to SciML.\nSee the SciML Style Guide for common coding practices and other style decisions.\nThere are a few community forums:\nThe #diffeq-bridged and #sciml-bridged channels in the Julia Slack\nThe #diffeq-bridged and #sciml-bridged channels in the Julia Zulip\nOn the Julia Discourse forums\nSee also SciML Community page","category":"page"},{"location":"#Reproducibility","page":"Home","title":"Reproducibility","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"
The documentation of this SciML package was built using these direct dependencies,","category":"page"},{"location":"","page":"Home","title":"Home","text":"using Pkg # hide\nPkg.status() # hide","category":"page"},{"location":"","page":"Home","title":"Home","text":"
","category":"page"},{"location":"","page":"Home","title":"Home","text":"
and using this machine and Julia version.","category":"page"},{"location":"","page":"Home","title":"Home","text":"using InteractiveUtils # hide\nversioninfo() # hide","category":"page"},{"location":"","page":"Home","title":"Home","text":"
","category":"page"},{"location":"","page":"Home","title":"Home","text":"
A more complete overview of all dependencies and their versions is also provided.","category":"page"},{"location":"","page":"Home","title":"Home","text":"using Pkg # hide\nPkg.status(;mode = PKGMODE_MANIFEST) # hide","category":"page"},{"location":"","page":"Home","title":"Home","text":"
","category":"page"},{"location":"","page":"Home","title":"Home","text":"using TOML\nusing Markdown\nversion = TOML.parse(read(\"../../Project.toml\", String))[\"version\"]\nname = TOML.parse(read(\"../../Project.toml\", String))[\"name\"]\nlink_manifest = \"https://github.com/SciML/\" * name * \".jl/tree/gh-pages/v\" * version *\n \"/assets/Manifest.toml\"\nlink_project = \"https://github.com/SciML/\" * name * \".jl/tree/gh-pages/v\" * version *\n \"/assets/Project.toml\"\nMarkdown.parse(\"\"\"You can also download the\n[manifest]($link_manifest)\nfile and the\n[project]($link_project)\nfile.\n\"\"\")","category":"page"},{"location":"interfaces/Array_and_Number/#arrayandnumber","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"We live in a society, and therefore there are rules. In this tutorial we outline the rules which are required on container and number types which are allowable in SciML tools.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"warn: Warn\nIn general as of 2023, strict adherence to this interface is an early work-in-progress. If anything does not conform to the documented interface, please open an issue.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"note: Note\nThere are many types which can work with a specific solver that do satisfy this interface. Many times as part of prototyping you may want to side-step the high level interface checks in order to simply test whether a new type is working. To do this, set interface_checks = false as a keyword argument to init/solve to bypass any of the internal interface checks. This means you will no longer get a nice high-level error message and instead it will attempt to use the type without restrictions. Note that not every problem/solver has implemented this new keyword argument as of 2023.","category":"page"},{"location":"interfaces/Array_and_Number/#Note-About-Wrapped-Solvers","page":"SciML Container (Array) and Number Interfaces","title":"Note About Wrapped Solvers","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Due to limitations of wrapped solvers, any solver that is a wrapped solver from an existing C/Fortran code is inherently limited to Float64 and Vector{Float64} for its operations. This includes packages like Sundials.jl, LSODA.jl, DASKR.jl, MINPACK.jl, and many more. This is fundamental to these solvers and it is not expected that they will allow the full set of SciML types in the future. If more abstract number/container definitions are required, then these are not the appropriate solvers to use.","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Number-Types","page":"SciML Container (Array) and Number Interfaces","title":"SciML Number Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"The number types are the types used to define the dependent variables (i.e. u0) and the independent variables (t or tspan). These two types can be different, and can have different restrictions depending on the type of solver which is employed. The following rules for a Number type are held in general:","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Number types can be used in SciML directly or in containers. If a problem defines a value like u0 using a Number type, the out-of-place form must be used for the problem definition.\nx::T + y::T = z::T\nx::T * y::T = z::T\noneunit(x::T)::T\none(x::T) * oneunit(x::T) = z::T\nt::T2 * x::T + y::T = z::T for T2 a time type and T the dependent variable type (this includes the muladd equivalent form).","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Additionally, the following rules apply to subsets of uses:","category":"page"},{"location":"interfaces/Array_and_Number/#Adaptive-Number-Types","page":"SciML Container (Array) and Number Interfaces","title":"Adaptive Number Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"x::T / y::T = z::T\nDefault choices of norms can assume sqrt(x::T)::T exists. If internalnorm is overridden then this may not be required (for example, changing the norm to inf-norm).\nx::T ^ y::T = z::T","category":"page"},{"location":"interfaces/Array_and_Number/#Time-Types-(Independent-Variables)","page":"SciML Container (Array) and Number Interfaces","title":"Time Types (Independent Variables)","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"If a solver is time adaptive, the time type must be a floating point number. Rational is only allowed for non-adaptive solves.","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Container-(Array)-Types","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Container types are types which hold number types. They can be used to define objects like the state vector (u0) of a problem. The following operations are required in a container type to be used with SciML solvers:","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Broadcast is defined according to the Julia broadcast interface.\nThe container type correctly defines interface overloads to satisfy the ArrayInterface.jl specification.\nArrayInterface.zeromatrix(x::T)::T2 defines a compatible matrix type (see below)\neltype(x::T)::T2 is a compatible Number type.\nx::T .+ y::T = z::T (i.e. broadcast similar is defined to be type-presurving)\nIndexing is only required if ArrayInterface.fast_scalar_indexing(x::T)==true. If true, scalar indexing x[i] is assumed to be defined and run through all variables.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"note: Note\n\"eltype(x::T)::T2 is a compatible Number type\" excludes Array{Array{T}} types of types. However, recursive vectors can conformed to the interface with zero overhead using tools from RecursiveArrayTools.jl such as VectorOfArray(x). Since this greatly simplifies the interfaces and the ability to check for correctness, doing this wrapping is highly recommended and there are no plans to relax this requirement.","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Additionally, the following rules apply to subsets of uses:","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Mutable-Array-Types","page":"SciML Container (Array) and Number Interfaces","title":"SciML Mutable Array Types","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"similar(x::T)::T\nzero(x::T)::T\nz::T .= x::T .+ y::T is defined\nz::T .= x::T .* y::T is defined\nz::T .= t::T2 .* x::T where T2 is the time type (a Number) and T is the container type.\n(Optional) Base.resize!(x,i) is required for resize!(integrator,i) to be supported.","category":"page"},{"location":"interfaces/Array_and_Number/#SciML-Matrix-(Operator)-Type","page":"SciML Container (Array) and Number Interfaces","title":"SciML Matrix (Operator) Type","text":"","category":"section"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"Note that the matrix type may not match the type of the initial container u0. An example is ComponentMatrix as the matrix structure corresponding to a ComponentArray. However, the following actions are assumed to hold on the resulting matrix type:","category":"page"},{"location":"interfaces/Array_and_Number/","page":"SciML Container (Array) and Number Interfaces","title":"SciML Container (Array) and Number Interfaces","text":"solve(LinearProblem(A::T,b::T2),linsolve) must be defined for a solver to work on a given SciML matrix type T2.\nIf the matrix is an operator, i.e. a lazy construct, it should conform to the SciMLOperators interface.\nIf not a SciMLOperator, diagind(W::T) should be defined and @view(A[idxs])=@view(A[idxs]) + λ::T","category":"page"},{"location":"interfaces/PDE/#The-PDE-Definition-Interface","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"While ODEs u = f(upt) can be defined by a user-function f, for PDEs the function form can be different for every PDE. How many functions, and how many inputs? This can always change. The SciML ecosystem solves this problem by using ModelingToolkit.jl to define PDESystem, a high-level symbolic description of the PDE to be consumed by other packages.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"The vision for the common PDE interface is that a user should only have to specify their PDE once, mathematically, and have instant access to everything as simple as a finite difference method with constant grid spacing, to something as complex as a distributed multi-GPU discontinuous Galerkin method.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"The key to the common PDE interface is a separation of the symbolic handling from the numerical world. All of the discretizers should not \"solve\" the PDE, but instead be a conversion of the mathematical specification to a numerical problem. Preferably, the transformation should be to another ModelingToolkit.jl AbstractSystem via a symbolic_discretize dispatch, but in some cases this cannot be done or will not be performant. Thus in some cases, only a discretize definition is given to a AbstractSciMLProblem, with symbolic_discretize simply providing diagnostic or lower level information about the construction process.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"These elementary problems, such as solving linear systems Ax=b, solving nonlinear systems f(x)=0, ODEs, etc. are all defined by SciMLBase.jl, which then numerical solvers can all target these common forms. Thus someone who works on linear solvers doesn't necessarily need to be working on a Discontinuous Galerkin or finite element library, but instead \"linear solvers that are good for matrices A with properties ...\" which are then accessible by every other discretization method in the common PDE interface.","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"Similar to the rest of the AbstractSystem types, transformation and analyses functions will allow for simplifying the PDE before solving it, and constructing block symbolic functions like Jacobians.","category":"page"},{"location":"interfaces/PDE/#Constructors","page":"The PDE Definition Interface","title":"Constructors","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"ModelingToolkit.PDESystem","category":"page"},{"location":"interfaces/PDE/#ModelingToolkit.PDESystem","page":"The PDE Definition Interface","title":"ModelingToolkit.PDESystem","text":"struct PDESystem <: AbstractMultivariateSystem\n\nA system of partial differential equations.\n\nFields\n\neqs: The equations which define the PDE.\nbcs: The boundary conditions.\ndomain: The domain for the independent variables.\nivs: The independent variables.\ndvs: The dependent variables.\nps: The parameters.\ndefaults: The default values to use when initial conditions and/or parameters are not supplied in ODEProblem.\n\nconnector_type: Type of the system.\n\nsystems: The internal systems. These are required to have unique names.\n\nanalytic: A vector of explicit symbolic expressions for the analytic solutions of each dependent variable. e.g. analytic = [u(t, x) ~ a*sin(c*t) * cos(k*x)].\n\nanalytic_func: A vector of functions for the analytic solutions of each dependent variable. Will be generated from analytic if not provided. Should have the same argument signature as the variable, and a ps argument as the last argument, which takes an indexable of parameter values in the order you specified them in ps. e.g. analytic_func = [u(t, x) => (ps, t, x) -> ps[1]*sin(ps[2]*t) * cos(ps[3]*x)].\n\nname: The name of the system.\n\nmetadata: Metadata for the system, to be used by downstream packages.\n\ngui_metadata: Metadata for MTK GUI.\n\nExample\n\nusing ModelingToolkit\n\n@parameters x\n@variables t u(..)\nDxx = Differential(x)^2\nDtt = Differential(t)^2\nDt = Differential(t)\n\n#2D PDE\nC=1\neq = Dtt(u(t,x)) ~ C^2*Dxx(u(t,x))\n\n# Initial and boundary conditions\nbcs = [u(t,0) ~ 0.,# for all t > 0\n u(t,1) ~ 0.,# for all t > 0\n u(0,x) ~ x*(1. - x), #for all 0 < x < 1\n Dt(u(0,x)) ~ 0. ] #for all 0 < x < 1]\n\n# Space and time domains\ndomains = [t ∈ (0.0,1.0),\n x ∈ (0.0,1.0)]\n\n@named pde_system = PDESystem(eq,bcs,domains,[t,x],[u])\n\n\n\n\n\n","category":"type"},{"location":"interfaces/PDE/#Domains-(WIP)","page":"The PDE Definition Interface","title":"Domains (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"Domains are specifying by saying indepvar in domain, where indepvar is a single or a collection of independent variables, and domain is the chosen domain type. A 2-tuple can be used to indicate an Interval. Thus forms for the indepvar can be like:","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"t ∈ (0.0,1.0)\n(t,x) ∈ UnitDisk()\n[v,w,x,y,z] ∈ VectorUnitBall(5)","category":"page"},{"location":"interfaces/PDE/#Domain-Types-(WIP)","page":"The PDE Definition Interface","title":"Domain Types (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"Interval(a,b): Defines the domain of an interval from a to b (requires explicit","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"import from DomainSets.jl, but a 2-tuple can be used instead)","category":"page"},{"location":"interfaces/PDE/#discretize-and-symbolic_discretize","page":"The PDE Definition Interface","title":"discretize and symbolic_discretize","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"The only functions which act on a PDESystem are the following:","category":"page"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"discretize(sys,discretizer): produces the outputted AbstractSystem or AbstractSciMLProblem.\nsymbolic_discretize(sys,discretizer): produces a debugging symbolic description of the discretized problem.","category":"page"},{"location":"interfaces/PDE/#Boundary-Conditions-(WIP)","page":"The PDE Definition Interface","title":"Boundary Conditions (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/#Transformations","page":"The PDE Definition Interface","title":"Transformations","text":"","category":"section"},{"location":"interfaces/PDE/#Analyses","page":"The PDE Definition Interface","title":"Analyses","text":"","category":"section"},{"location":"interfaces/PDE/#Discretizer-Ecosystem","page":"The PDE Definition Interface","title":"Discretizer Ecosystem","text":"","category":"section"},{"location":"interfaces/PDE/#NeuralPDE.jl:-PhysicsInformedNN","page":"The PDE Definition Interface","title":"NeuralPDE.jl: PhysicsInformedNN","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"NeuralPDE.jl defines the PhysicsInformedNN discretizer which uses a DiffEqFlux.jl neural network to solve the differential equation.","category":"page"},{"location":"interfaces/PDE/#MethodOfLines.jl:-MOLFiniteDifference-(WIP)","page":"The PDE Definition Interface","title":"MethodOfLines.jl: MOLFiniteDifference (WIP)","text":"","category":"section"},{"location":"interfaces/PDE/","page":"The PDE Definition Interface","title":"The PDE Definition Interface","text":"MethodOfLines.jl defines the MOLFiniteDifference discretizer which performs a finite difference discretization using the DiffEqOperators.jl stencils. These stencils make use of NNLib.jl for fast operations on semi-linear domains.","category":"page"}] }