diff --git a/Project.toml b/Project.toml index 1205910b7..7ee0625b0 100644 --- a/Project.toml +++ b/Project.toml @@ -50,7 +50,7 @@ PrecompileTools = "1" RecursiveArrayTools = "2" Reexport = "0.2, 1" SciMLBase = "2.4" -SimpleNonlinearSolve = "0.1" +SimpleNonlinearSolve = "0.1.22" SparseDiffTools = "2.6" StaticArraysCore = "1.4" UnPack = "1.0" diff --git a/docs/pages.jl b/docs/pages.jl index 35f924cdd..c29d2874b 100644 --- a/docs/pages.jl +++ b/docs/pages.jl @@ -2,8 +2,7 @@ pages = ["index.md", "tutorials/getting_started.md", - "Tutorials" => Any[ - "tutorials/code_optimization.md", + "Tutorials" => Any["tutorials/code_optimization.md", "tutorials/large_systems.md", "tutorials/small_compile.md", "tutorials/termination_conditions.md", diff --git a/docs/src/solvers/NonlinearSystemSolvers.md b/docs/src/solvers/NonlinearSystemSolvers.md index c4f0a66fb..740654d57 100644 --- a/docs/src/solvers/NonlinearSystemSolvers.md +++ b/docs/src/solvers/NonlinearSystemSolvers.md @@ -16,7 +16,7 @@ it will make sure to work. If one is looking for more robustness then `RobustMultiNewton` is a good choice. It attempts a set of the most robust methods in succession and only fails if -all of the methods fail to converge. Additionally, `DynamicSS` can be a good choice +all of the methods fail to converge. Additionally, `DynamicSS` can be a good choice for high stability. As a balance, `NewtonRaphson` is a good choice for most problems that aren't too @@ -25,13 +25,13 @@ but more stable. If the problem is well-conditioned, `Klement` or `Broyden` may be faster, but highly dependent on the eigenvalues of the Jacobian being sufficiently small. -`NewtonRaphson` and `TrustRegion` are designed for for large systems. +`NewtonRaphson` and `TrustRegion` are designed for for large systems. They can make use of sparsity patterns for sparse automatic differentiation and sparse linear solving of very large systems. Meanwhile, `SimpleNewtonRaphson` and `SimpleTrustRegion` are implementations which is specialized for small equations. They are non-allocating on static arrays and thus really well-optimized for small systems, thus usually outperforming the other methods when such types are -used for `u0`. +used for `u0`. ## Full List of Methods @@ -57,7 +57,7 @@ features, but have a bit of overhead on very small problems. large-scale and numerically-difficult nonlinear systems. - `RobustMultiNewton()`: A polyalgorithm that mixes highly robust methods (line searches and trust regions) in order to be as robust as possible for difficult problems. If this method - fails to converge, then one can be pretty certain that most (all?) other choices would + fails to converge, then one can be pretty certain that most (all?) other choices would likely fail. - `FastShortcutNonlinearPolyalg`: The default method. A polyalgorithm that mixes fast methods with fallbacks to robust methods to allow for solving easy problems quickly without sacrificing diff --git a/docs/src/tutorials/code_optimization.md b/docs/src/tutorials/code_optimization.md index bb6fbdbd4..24d003206 100644 --- a/docs/src/tutorials/code_optimization.md +++ b/docs/src/tutorials/code_optimization.md @@ -55,4 +55,4 @@ prob = NonlinearProblem(f!, u0, p) linsolve = LinearSolve.KrylovJL_GMRES() sol = solve(prob, NewtonRaphson(; linsolve), reltol = 1e-9) -``` \ No newline at end of file +``` diff --git a/docs/src/tutorials/getting_started.md b/docs/src/tutorials/getting_started.md index edcbb6921..9f1685557 100644 --- a/docs/src/tutorials/getting_started.md +++ b/docs/src/tutorials/getting_started.md @@ -9,19 +9,19 @@ to understanding the deeper parts of the documentation. There are three types of nonlinear systems: -1. The "standard nonlinear system", i.e. the `NonlinearProblem`. This is a - system of equations with an initial condition where you want to satisfy - all equations simultaniously. -2. The "interval rootfinding problem", i.e. the `IntervalNonlinearProblem`. - This is the case where you're given an interval `[a,b]` and need to find - where `f(u) = 0` for `u` inside the bounds. -3. The "steady state problem", i.e. find the `u` such that `u' = f(u) = 0`. - While related to (1), it's not entirely the same because there's a uniquely - defined privledged root. -4. The nonlinear least squares problem, which is an overconstrained nonlinear - system (i.e. more equations than states) which might not be satisfiable, i.e. - there may be no `u` such that `f(u) = 0`, and thus we find the `u` which - minimizes `||f(u)||` in the least squares sense. + 1. The "standard nonlinear system", i.e. the `NonlinearProblem`. This is a + system of equations with an initial condition where you want to satisfy + all equations simultaniously. + 2. The "interval rootfinding problem", i.e. the `IntervalNonlinearProblem`. + This is the case where you're given an interval `[a,b]` and need to find + where `f(u) = 0` for `u` inside the bounds. + 3. The "steady state problem", i.e. find the `u` such that `u' = f(u) = 0`. + While related to (1), it's not entirely the same because there's a uniquely + defined privledged root. + 4. The nonlinear least squares problem, which is an overconstrained nonlinear + system (i.e. more equations than states) which might not be satisfiable, i.e. + there may be no `u` such that `f(u) = 0`, and thus we find the `u` which + minimizes `||f(u)||` in the least squares sense. For now let's focus on the first two. The other two are covered in later tutorials, but from the first two we can show the general flow of the NonlinearSolve.jl package. @@ -105,7 +105,7 @@ For a complete list of solver choices, see [the nonlinear system solvers page](@ Next we can modify the tolerances. Here let's set some really low tolerances to force a tight solution: ```@example 1 -solve(prob, TrustRegion(), reltol=1e-12, abstol=1e-12) +solve(prob, TrustRegion(), reltol = 1e-12, abstol = 1e-12) ``` There are many more options for doing this configuring. Specifically for handling termination conditions, @@ -139,10 +139,10 @@ sol = solve(prob_int, ITP(), abstol = 0.01) Congrats, you now know how to use the basics of NonlinearSolve.jl! However, there is so much more to see. Next check out: -- [Some code optimization tricks to know about with NonlinearSolve.jl](@ref code_optimization) -- [An iterator interface which lets you step through the solving process step by step](@ref iterator) -- [How to handle large systems of equations efficiently](@ref large_systems) -- [Ways to use NonlinearSolve.jl that is faster to startup and can statically compile](@ref fast_startup) -- [More detailed termination conditions](@ref termination_conditions_tutorial) + - [Some code optimization tricks to know about with NonlinearSolve.jl](@ref code_optimization) + - [An iterator interface which lets you step through the solving process step by step](@ref iterator) + - [How to handle large systems of equations efficiently](@ref large_systems) + - [Ways to use NonlinearSolve.jl that is faster to startup and can statically compile](@ref fast_startup) + - [More detailed termination conditions](@ref termination_conditions_tutorial) -And also check out the rest of the manual. \ No newline at end of file +And also check out the rest of the manual. diff --git a/docs/src/tutorials/iterator_interface.md b/docs/src/tutorials/iterator_interface.md index 640fb5f02..c0fb914f4 100644 --- a/docs/src/tutorials/iterator_interface.md +++ b/docs/src/tutorials/iterator_interface.md @@ -1,7 +1,7 @@ # [Nonlinear Solver Iterator Interface](@id iterator) !!! warn - + This iterator interface will be expanded with a `step!` function soon! There is an iterator form of the nonlinear solver which mirrors the DiffEq integrator interface: diff --git a/docs/src/tutorials/large_systems.md b/docs/src/tutorials/large_systems.md index 43c7f6cd6..5cbb99167 100644 --- a/docs/src/tutorials/large_systems.md +++ b/docs/src/tutorials/large_systems.md @@ -209,8 +209,7 @@ function incompletelu(W, du, u, p, t, newW, Plprev, Prprev, solverdata) end @btime solve(prob_brusselator_2d_sparse, - NewtonRaphson(linsolve = KrylovJL_GMRES(), precs = incompletelu, - concrete_jac = true)); + NewtonRaphson(linsolve = KrylovJL_GMRES(), precs = incompletelu, concrete_jac = true)); nothing # hide ``` @@ -275,28 +274,3 @@ nothing # hide For more information on the preconditioner interface, see the [linear solver documentation](https://docs.sciml.ai/LinearSolve/stable/basics/Preconditioners/). - -## Speed up Jacobian computation with sparsity exploitation and matrix coloring - -To cut down the of Jacobian building overhead, we can choose to exploit the sparsity pattern and deploy matrix coloring during Jacobian construction. With NonlinearSolve.jl, we can simply use `autodiff=AutoSparseForwardDiff()` to automatically exploit the sparsity pattern of Jacobian matrices: - -```@example ill_conditioned_nlprob -@btime solve(prob_brusselator_2d, - NewtonRaphson(linsolve = KrylovJL_GMRES(), precs = incompletelu, concrete_jac = true, - autodiff = AutoSparseForwardDiff())); -nothing # hide -``` - -To setup matrix coloring for the jacobian sparsity pattern, we can simply get the coloring vector by using [ArrayInterface.jl](https://github.com/JuliaArrays/ArrayInterface.jl) for the sparsity pattern of `jac_prototype`: - -```@example ill_conditioned_nlprob -using ArrayInterface -colorvec = ArrayInterface.matrix_colors(jac_sparsity) -ff = NonlinearFunction(brusselator_2d_loop; jac_prototype = float.(jac_sparsity), colorvec) -prob_brusselator_2d_sparse = NonlinearProblem(ff, u0, p) - -@btime solve(prob_brusselator_2d_sparse, - NewtonRaphson(linsolve = KrylovJL_GMRES(), precs = incompletelu, concrete_jac = true, - autodiff = AutoSparseForwardDiff())); -nothing # hide -``` diff --git a/docs/src/tutorials/small_compile.md b/docs/src/tutorials/small_compile.md index e6aa07312..aab4531b8 100644 --- a/docs/src/tutorials/small_compile.md +++ b/docs/src/tutorials/small_compile.md @@ -1,3 +1,3 @@ # Faster Startup and and Static Compilation -This is a stub article to be completed soon. \ No newline at end of file +This is a stub article to be completed soon. diff --git a/docs/src/tutorials/termination_conditions.md b/docs/src/tutorials/termination_conditions.md index 152e0abc4..6a7e8a3cd 100644 --- a/docs/src/tutorials/termination_conditions.md +++ b/docs/src/tutorials/termination_conditions.md @@ -1,3 +1,3 @@ # [More Detailed Termination Conditions](@id termination_conditions_tutorial) -This is a stub article to be completed soon. \ No newline at end of file +This is a stub article to be completed soon. diff --git a/src/default.jl b/src/default.jl index 44ac26cad..913f994fe 100644 --- a/src/default.jl +++ b/src/default.jl @@ -1,6 +1,6 @@ """ -RobustMultiNewton(; concrete_jac = nothing, linsolve = nothing, - precs = DEFAULT_PRECS, adkwargs...) + RobustMultiNewton(; concrete_jac = nothing, linsolve = nothing, precs = DEFAULT_PRECS, + adkwargs...) A polyalgorithm focused on robustness. It uses a mixture of Newton methods with different globalizing techniques (trust region updates, line searches, etc.) in order to find a @@ -12,20 +12,20 @@ or more precision / more stable linear solver choice is required). ### Keyword Arguments -- `autodiff`: determines the backend used for the Jacobian. Note that this argument is + - `autodiff`: determines the backend used for the Jacobian. Note that this argument is ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to `AutoForwardDiff()`. Valid choices are types from ADTypes.jl. -- `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, + - `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, then the Jacobian will not be constructed and instead direct Jacobian-vector products `J*v` are computed using forward-mode automatic differentiation or finite differencing tricks (without ever constructing the Jacobian). However, if the Jacobian is still needed, for example for a preconditioner, `concrete_jac = true` can be passed in order to force the construction of the Jacobian. -- `linsolve`: the [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl) used for the + - `linsolve`: the [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl) used for the linear solves within the Newton method. Defaults to `nothing`, which means it uses the LinearSolve.jl default algorithm choice. For more information on available algorithm choices, see the [LinearSolve.jl documentation](https://docs.sciml.ai/LinearSolve/stable/). -- `precs`: the choice of preconditioners for the linear solver. Defaults to using no + - `precs`: the choice of preconditioners for the linear solver. Defaults to using no preconditioners. For more information on specifying preconditioners for LinearSolve algorithms, consult the [LinearSolve.jl documentation](https://docs.sciml.ai/LinearSolve/stable/). @@ -41,57 +41,59 @@ end # Robusters! const Robusters = RobustMultiNewton -function RobustMultiNewton(; concrete_jac = nothing, linsolve = nothing, - precs = DEFAULT_PRECS, adkwargs...) - +function RobustMultiNewton(; concrete_jac = nothing, linsolve = nothing, + precs = DEFAULT_PRECS, adkwargs...) return RobustMultiNewton{_unwrap_val(concrete_jac)}(adkwargs, linsolve, precs) end -@concrete mutable struct RobustMultiNewtonCache{iip} <: AbstractNonlinearSolveCache{iip} +@concrete mutable struct RobustMultiNewtonCache{iip, N} <: AbstractNonlinearSolveCache{iip} caches alg current::Int end -function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::RobustMultiNewton, args...; - kwargs...) where {uType, iip} - - adkwargs = alg.adkwargs - linsolve = alg.linsolve - precs = alg.precs - - RobustMultiNewtonCache{iip}(( - SciMLBase.__init(prob, TrustRegion(;linsolve, precs, adkwargs...), args...; kwargs...), - SciMLBase.__init(prob, TrustRegion(;linsolve, precs, radius_update_scheme = RadiusUpdateSchemes.Bastin, adkwargs...), args...; kwargs...), - SciMLBase.__init(prob, NewtonRaphson(;linsolve, precs, linesearch=BackTracking(), adkwargs...), args...; kwargs...), - SciMLBase.__init(prob, TrustRegion(;linsolve, precs, radius_update_scheme = RadiusUpdateSchemes.Fan, adkwargs...), args...; kwargs...), - ), alg, 1 - ) +function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::RobustMultiNewton, + args...; kwargs...) where {uType, iip} + @unpack adkwargs, linsolve, precs = alg + + algs = (TrustRegion(; linsolve, precs, adkwargs...), + TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.Bastin, adkwargs...), + NewtonRaphson(; linsolve, precs, linesearch = BackTracking(), adkwargs...), + TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.NLsolve, adkwargs...), + TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.Fan, adkwargs...)) + + # Partially Type Unstable but can't do much since some upstream caches -- LineSearches + # and SparseDiffTools cause the instability + return RobustMultiNewtonCache{iip, length(algs)}(map(solver -> SciMLBase.__init(prob, + solver, args...; kwargs...), algs), alg, 1) end """ -FastShortcutNonlinearPolyalg(; concrete_jac = nothing, linsolve = nothing, - precs = DEFAULT_PRECS, adkwargs...) + FastShortcutNonlinearPolyalg(; concrete_jac = nothing, linsolve = nothing, + precs = DEFAULT_PRECS, adkwargs...) A polyalgorithm focused on balancing speed and robustness. It first tries less robust methods for more performance and then tries more robust techniques if the faster ones fail. ### Keyword Arguments -- `autodiff`: determines the backend used for the Jacobian. Note that this argument is + - `autodiff`: determines the backend used for the Jacobian. Note that this argument is ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to `AutoForwardDiff()`. Valid choices are types from ADTypes.jl. -- `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, + - `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, then the Jacobian will not be constructed and instead direct Jacobian-vector products `J*v` are computed using forward-mode automatic differentiation or finite differencing tricks (without ever constructing the Jacobian). However, if the Jacobian is still needed, for example for a preconditioner, `concrete_jac = true` can be passed in order to force the construction of the Jacobian. -- `linsolve`: the [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl) used for the + - `linsolve`: the [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl) used for the linear solves within the Newton method. Defaults to `nothing`, which means it uses the LinearSolve.jl default algorithm choice. For more information on available algorithm choices, see the [LinearSolve.jl documentation](https://docs.sciml.ai/LinearSolve/stable/). -- `precs`: the choice of preconditioners for the linear solver. Defaults to using no + - `precs`: the choice of preconditioners for the linear solver. Defaults to using no preconditioners. For more information on specifying preconditioners for LinearSolve algorithms, consult the [LinearSolve.jl documentation](https://docs.sciml.ai/LinearSolve/stable/). @@ -102,264 +104,166 @@ for more performance and then tries more robust techniques if the faster ones fa precs end -function FastShortcutNonlinearPolyalg(; concrete_jac = nothing, linsolve = nothing, +function FastShortcutNonlinearPolyalg(; concrete_jac = nothing, linsolve = nothing, precs = DEFAULT_PRECS, adkwargs...) - - return FastShortcutNonlinearPolyalg{_unwrap_val(concrete_jac)}(adkwargs, linsolve, precs) + return FastShortcutNonlinearPolyalg{_unwrap_val(concrete_jac)}(adkwargs, linsolve, + precs) end -@concrete mutable struct FastShortcutNonlinearPolyalgCache{iip} <: AbstractNonlinearSolveCache{iip} +@concrete mutable struct FastShortcutNonlinearPolyalgCache{iip, N} <: + AbstractNonlinearSolveCache{iip} caches alg current::Int end -function FastShortcutNonlinearPolyalgCache(; concrete_jac = nothing, linsolve = nothing, - precs = DEFAULT_PRECS, adkwargs...) - - return FastShortcutNonlinearPolyalgCache{_unwrap_val(concrete_jac)}(adkwargs, linsolve, precs) +function FastShortcutNonlinearPolyalgCache(; concrete_jac = nothing, linsolve = nothing, + precs = DEFAULT_PRECS, adkwargs...) + return FastShortcutNonlinearPolyalgCache{_unwrap_val(concrete_jac)}(adkwargs, linsolve, + precs) end -function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::FastShortcutNonlinearPolyalg, args...; - kwargs...) where {uType, iip} - - adkwargs = alg.adkwargs - linsolve = alg.linsolve - precs = alg.precs - - FastShortcutNonlinearPolyalgCache{iip}(( - #SciMLBase.__init(prob, Klement(), args...; kwargs...), - #SciMLBase.__init(prob, Broyden(), args...; kwargs...), - SciMLBase.__init(prob, NewtonRaphson(;linsolve, precs, adkwargs...), args...; kwargs...), - SciMLBase.__init(prob, NewtonRaphson(;linsolve, precs, linesearch=BackTracking(), adkwargs...), args...; kwargs...), - SciMLBase.__init(prob, TrustRegion(;linsolve, precs, adkwargs...), args...; kwargs...), - SciMLBase.__init(prob, TrustRegion(;linsolve, precs, radius_update_scheme = RadiusUpdateSchemes.Bastin, adkwargs...), args...; kwargs...), - ), alg, 1 - ) +function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, + alg::FastShortcutNonlinearPolyalg, args...; kwargs...) where {uType, iip} + @unpack adkwargs, linsolve, precs = alg + + algs = ( + # Klement(), + # Broyden(), + NewtonRaphson(; linsolve, precs, adkwargs...), + NewtonRaphson(; linsolve, precs, linesearch = BackTracking(), adkwargs...), + TrustRegion(; linsolve, precs, adkwargs...), + TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.Bastin, adkwargs...)) + + return FastShortcutNonlinearPolyalgCache{iip, length(algs)}(map(solver -> SciMLBase.__init(prob, + solver, args...; kwargs...), algs), alg, 1) end -function SciMLBase.__solve(prob::NonlinearProblem{uType, false}, alg::FastShortcutNonlinearPolyalg, args...; - kwargs...) where {uType} - - adkwargs = alg.adkwargs - linsolve = alg.linsolve - precs = alg.precs - - sol1 = SciMLBase.__solve(prob, Klement(), args...; kwargs...) - if SciMLBase.successful_retcode(sol1) - return SciMLBase.build_solution(prob, alg, sol1.u, sol1.resid; - sol1.retcode, sol1.stats) - end - - sol2 = SciMLBase.__solve(prob, Broyden(), args...; kwargs...) - if SciMLBase.successful_retcode(sol2) - return SciMLBase.build_solution(prob, alg, sol2.u, sol2.resid; - sol2.retcode, sol2.stats) - end - - sol3 = SciMLBase.__solve(prob, NewtonRaphson(;linsolve, precs, adkwargs...), args...; kwargs...) - if SciMLBase.successful_retcode(sol3) - return SciMLBase.build_solution(prob, alg, sol3.u, sol3.resid; - sol3.retcode, sol3.stats) - end - - sol4 = SciMLBase.__solve(prob, TrustRegion(;linsolve, precs, adkwargs...), args...; kwargs...) - if SciMLBase.successful_retcode(sol4) - return SciMLBase.build_solution(prob, alg, sol4.u, sol4.resid; - sol4.retcode, sol4.stats) - end - - resids = (sol1.resid, sol2.resid, sol3.resid, sol4.resid) - minfu, idx = findmin(DEFAULT_NORM, resids) - - if idx == 1 - SciMLBase.build_solution(prob, alg, sol1.u, sol1.resid; - sol1.retcode, sol1.stats) - elseif idx == 2 - SciMLBase.build_solution(prob, alg, sol2.u, sol2.resid; - sol2.retcode, sol2.stats) - elseif idx == 3 - SciMLBase.build_solution(prob, alg, sol3.u, sol3.resid; - sol3.retcode, sol3.stats) - elseif idx == 4 - SciMLBase.build_solution(prob, alg, sol4.u, sol4.resid; - sol4.retcode, sol4.stats) +# This version doesn't allocate all the caches! +@generated function SciMLBase.__solve(prob::NonlinearProblem{uType, iip}, + alg::Union{FastShortcutNonlinearPolyalg, RobustMultiNewton}, args...; + kwargs...) where {uType, iip} + calls = [:(@unpack adkwargs, linsolve, precs = alg)] + + algs = if parameterless_type(alg) == RobustMultiNewton + [ + :(TrustRegion(; linsolve, precs, adkwargs...)), + :(TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.Bastin, adkwargs...)), + :(NewtonRaphson(; linsolve, precs, linesearch = BackTracking(), adkwargs...)), + :(TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.NLsolve, adkwargs...)), + :(TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.Fan, adkwargs...)), + ] else - error("Unreachable reached, 박정석") + [ + # FIXME: Broyden and Klement are type unstable + # (upstream SimpleNonlinearSolve.jl issue) + !iip ? :(Klement()) : nothing, # Klement not yet implemented for IIP + !iip ? :(Broyden()) : nothing, # Broyden not yet implemented for IIP + :(NewtonRaphson(; linsolve, precs, adkwargs...)), + :(NewtonRaphson(; linsolve, precs, linesearch = BackTracking(), adkwargs...)), + :(TrustRegion(; linsolve, precs, adkwargs...)), + :(TrustRegion(; linsolve, precs, + radius_update_scheme = RadiusUpdateSchemes.Bastin, adkwargs...)), + ] end - -end - -function SciMLBase.__solve(prob::NonlinearProblem{uType, true}, alg::FastShortcutNonlinearPolyalg, args...; - kwargs...) where {uType} - - adkwargs = alg.adkwargs - linsolve = alg.linsolve - precs = alg.precs - - sol1 = SciMLBase.__solve(prob, NewtonRaphson(;linsolve, precs, adkwargs...), args...; kwargs...) - if SciMLBase.successful_retcode(sol1) - return SciMLBase.build_solution(prob, alg, sol1.u, sol1.resid; - sol1.retcode, sol1.stats) + filter!(!isnothing, algs) + sol_syms = [gensym("sol") for i in 1:length(algs)] + for i in 1:length(algs) + cur_sol = sol_syms[i] + push!(calls, + quote + $(cur_sol) = SciMLBase.__solve(prob, $(algs[i]), args...; kwargs...) + if SciMLBase.successful_retcode($(cur_sol)) + return SciMLBase.build_solution(prob, alg, $(cur_sol).u, + $(cur_sol).resid; $(cur_sol).retcode, $(cur_sol).stats, + original = $(cur_sol)) + end + end) end - sol2 = SciMLBase.__solve(prob, NewtonRaphson(;linsolve, precs, linesearch=BackTracking(), adkwargs...), args...; kwargs...) - if SciMLBase.successful_retcode(sol2) - return SciMLBase.build_solution(prob, alg, sol2.u, sol2.resid; - sol2.retcode, sol2.stats) + resids = map(x -> Symbol("$(x)_resid"), sol_syms) + for (sym, resid) in zip(sol_syms, resids) + push!(calls, :($(resid) = $(sym).resid)) end - sol3 = SciMLBase.__solve(prob, TrustRegion(;linsolve, precs, adkwargs...), args...; kwargs...) - if SciMLBase.successful_retcode(sol3) - return SciMLBase.build_solution(prob, alg, sol3.u, sol3.resid; - sol3.retcode, sol3.stats) - end - - sol4 = SciMLBase.__solve(prob, TrustRegion(;linsolve, precs, radius_update_scheme = RadiusUpdateSchemes.Bastin, adkwargs...), args...; kwargs...) - if SciMLBase.successful_retcode(sol4) - return SciMLBase.build_solution(prob, alg, sol4.u, sol4.resid; - sol4.retcode, sol4.stats) - end - - resids = (sol1.resid, sol2.resid, sol3.resid, sol4.resid) - minfu, idx = findmin(DEFAULT_NORM, resids) - - if idx == 1 - SciMLBase.build_solution(prob, alg, sol1.u, sol1.resid; - sol1.retcode, sol1.stats) - elseif idx == 2 - SciMLBase.build_solution(prob, alg, sol2.u, sol2.resid; - sol2.retcode, sol2.stats) - elseif idx == 3 - SciMLBase.build_solution(prob, alg, sol3.u, sol3.resid; - sol3.retcode, sol3.stats) - elseif idx == 4 - SciMLBase.build_solution(prob, alg, sol4.u, sol4.resid; - sol4.retcode, sol4.stats) - else - error("Unreachable reached, 박정석") + push!(calls, + quote + resids = tuple($(Tuple(resids)...)) + minfu, idx = findmin(DEFAULT_NORM, resids) + end) + + for i in 1:length(algs) + push!(calls, + quote + if idx == $i + return SciMLBase.build_solution(prob, alg, $(sol_syms[i]).u, + $(sol_syms[i]).resid; $(sol_syms[i]).retcode, $(sol_syms[i]).stats) + end + end) end + push!(calls, :(error("Current choices shouldn't get here!"))) + return Expr(:block, calls...) end ## General shared polyalg functions -function perform_step!(cache::Union{RobustMultiNewtonCache, FastShortcutNonlinearPolyalgCache}) - current = cache.current - - while true - if current == 1 - perform_step!(cache.caches[1]) - elseif current == 2 - perform_step!(cache.caches[2]) - elseif current == 3 - perform_step!(cache.caches[3]) - elseif current == 4 - perform_step!(cache.caches[4]) - else - error("Current choices shouldn't get here!") - end +@generated function SciMLBase.solve!(cache::Union{RobustMultiNewtonCache{iip, N}, + FastShortcutNonlinearPolyalgCache{iip, N}}) where {iip, N} + calls = [ + quote + 1 ≤ cache.current ≤ length(cache.caches) || + error("Current choices shouldn't get here!") + end, + ] + + cache_syms = [gensym("cache") for i in 1:N] + sol_syms = [gensym("sol") for i in 1:N] + for i in 1:N + push!(calls, + quote + $(cache_syms[i]) = cache.caches[$(i)] + if $(i) == cache.current + $(sol_syms[i]) = SciMLBase.solve!($(cache_syms[i])) + if SciMLBase.successful_retcode($(sol_syms[i])) + stats = $(sol_syms[i]).stats + u = $(sol_syms[i]).u + fu = get_fu($(cache_syms[i])) + return SciMLBase.build_solution($(sol_syms[i]).prob, cache.alg, u, + fu; retcode = ReturnCode.Success, stats, + original = $(sol_syms[i])) + end + cache.current = $(i + 1) + end + end) end - return nothing -end - -function SciMLBase.solve!(cache::Union{RobustMultiNewtonCache, FastShortcutNonlinearPolyalgCache}) - current = cache.current - - while current < 5 && all(not_terminated, cache.caches) - if current == 1 - perform_step!(cache.caches[1]) - !not_terminated(cache.caches[1]) && (cache.current += 1) - elseif current == 2 - perform_step!(cache.caches[2]) - !not_terminated(cache.caches[2]) && (cache.current += 1) - elseif current == 3 - perform_step!(cache.caches[3]) - !not_terminated(cache.caches[3]) && (cache.current += 1) - elseif current == 4 - perform_step!(cache.caches[4]) - !not_terminated(cache.caches[4]) && (cache.current += 1) - else - error("Current choices shouldn't get here!") - end - - - - #cache.stats.nsteps += 1 + resids = map(x -> Symbol("$(x)_resid"), cache_syms) + for (sym, resid) in zip(cache_syms, resids) + push!(calls, :($(resid) = get_fu($(sym)))) end + push!(calls, + quote + retcode = ReturnCode.MaxIters - if current < 5 - stats = if current == 1 - cache.caches[1].stats - elseif current == 2 - cache.caches[2].stats - elseif current == 3 - cache.caches[3].stats - elseif current == 4 - cache.caches[4].stats - end - - u = if current == 1 - cache.caches[1].u - elseif current == 2 - cache.caches[2].u - elseif current == 3 - cache.caches[3].u - elseif current == 4 - cache.caches[4].u - end - - fu = if current == 1 - get_fu(cache.caches[1]) - elseif current == 2 - get_fu(cache.caches[2]) - elseif current == 3 - get_fu(cache.caches[3]) - elseif current == 4 - get_fu(cache.caches[4]) - end - - retcode = if stats.nsteps == cache.caches[1].maxiters - ReturnCode.MaxIters - else - ReturnCode.Success - end - - return SciMLBase.build_solution(cache.caches[1].prob, cache.alg, u, fu; - retcode, stats) - else - retcode = ReturnCode.MaxIters - - fus = (get_fu(cache.caches[1]), get_fu(cache.caches[2]), get_fu(cache.caches[3]), get_fu(cache.caches[4])) - minfu, idx = findmin(cache.caches[1].internalnorm, fus) - - stats = if idx == 1 - cache.caches[1].stats - elseif idx == 2 - cache.caches[2].stats - elseif idx == 3 - cache.caches[3].stats - elseif idx == 4 - cache.caches[4].stats - end - - u = if idx == 1 - cache.caches[1].u - elseif idx == 2 - cache.caches[2].u - elseif idx == 3 - cache.caches[3].u - elseif idx == 4 - cache.caches[4].u - end - - return SciMLBase.build_solution(cache.caches[1].prob, cache.alg, u, fu; - retcode, stats) - end + fus = tuple($(Tuple(resids)...)) + minfu, idx = findmin(cache.caches[1].internalnorm, fus) + stats = cache.caches[idx].stats + u = cache.caches[idx].u + + return SciMLBase.build_solution(cache.caches[idx].prob, cache.alg, u, + fus[idx]; retcode, stats) + end) + + return Expr(:block, calls...) end -function SciMLBase.reinit!(cache::Union{RobustMultiNewtonCache, FastShortcutNonlinearPolyalgCache}, args...; kwargs...) +function SciMLBase.reinit!(cache::Union{RobustMultiNewtonCache, + FastShortcutNonlinearPolyalgCache}, args...; kwargs...) for c in cache.caches SciMLBase.reinit!(c, args...; kwargs...) end @@ -367,14 +271,12 @@ end ## Defaults -function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::Nothing, args...; +function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::Nothing, args...; kwargs...) where {uType, iip} - SciMLBase.__init(prob, FastShortcutNonlinearPolyalg(), args...; kwargs...) end -function SciMLBase.__solve(prob::NonlinearProblem{uType, iip}, alg::Nothing, args...; +function SciMLBase.__solve(prob::NonlinearProblem{uType, iip}, alg::Nothing, args...; kwargs...) where {uType, iip} - SciMLBase.__solve(prob, FastShortcutNonlinearPolyalg(), args...; kwargs...) -end \ No newline at end of file +end diff --git a/src/dfsane.jl b/src/dfsane.jl index a77703906..13de5ff6a 100644 --- a/src/dfsane.jl +++ b/src/dfsane.jl @@ -57,97 +57,51 @@ struct DFSane{T, F} <: AbstractNonlinearSolveAlgorithm max_inner_iterations::Int end -function DFSane(; σ_min = 1e-10, - σ_max = 1e+10, - σ_1 = 1.0, - M = 10, - γ = 1e-4, - τ_min = 0.1, - τ_max = 0.5, - n_exp = 2, - η_strategy = (fn_1, n, x_n, f_n) -> fn_1 / n^2, - max_inner_iterations = 1000) - return DFSane{typeof(σ_min), typeof(η_strategy)}(σ_min, - σ_max, - σ_1, - M, - γ, - τ_min, - τ_max, - n_exp, - η_strategy, - max_inner_iterations) +function DFSane(; σ_min = 1e-10, σ_max = 1e+10, σ_1 = 1.0, M = 10, γ = 1e-4, τ_min = 0.1, + τ_max = 0.5, n_exp = 2, η_strategy = (fn_1, n, x_n, f_n) -> fn_1 / n^2, + max_inner_iterations = 1000) + return DFSane{typeof(σ_min), typeof(η_strategy)}(σ_min, σ_max, σ_1, M, γ, τ_min, τ_max, + n_exp, η_strategy, max_inner_iterations) end -mutable struct DFSaneCache{iip, algType, uType, resType, T, pType, - INType, - tolType, - probType} - f::Function - alg::algType - uₙ::uType - uₙ₋₁::uType - fuₙ::resType - fuₙ₋₁::resType - 𝒹::uType - ℋ::Vector{T} - f₍ₙₒᵣₘ₎ₙ₋₁::T - f₍ₙₒᵣₘ₎₀::T - M::Int - σₙ::T - σₘᵢₙ::T - σₘₐₓ::T - α₁::T - γ::T - τₘᵢₙ::T - τₘₐₓ::T + +@concrete mutable struct DFSaneCache{iip} + alg + uₙ + uₙ₋₁ + fuₙ + fuₙ₋₁ + 𝒹 + ℋ + f₍ₙₒᵣₘ₎ₙ₋₁ + f₍ₙₒᵣₘ₎₀ + M + σₙ + σₘᵢₙ + σₘₐₓ + α₁ + γ + τₘᵢₙ + τₘₐₓ nₑₓₚ::Int - p::pType + p force_stop::Bool maxiters::Int - internalnorm::INType + internalnorm retcode::SciMLBase.ReturnCode.T - abstol::tolType - prob::probType + abstol + prob stats::NLStats - function DFSaneCache{iip}(f::Function, alg::algType, uₙ::uType, uₙ₋₁::uType, - fuₙ::resType, fuₙ₋₁::resType, 𝒹::uType, ℋ::Vector{T}, - f₍ₙₒᵣₘ₎ₙ₋₁::T, f₍ₙₒᵣₘ₎₀::T, M::Int, σₙ::T, σₘᵢₙ::T, σₘₐₓ::T, - α₁::T, γ::T, τₘᵢₙ::T, τₘₐₓ::T, nₑₓₚ::Int, p::pType, - force_stop::Bool, maxiters::Int, internalnorm::INType, - retcode::SciMLBase.ReturnCode.T, abstol::tolType, - prob::probType, - stats::NLStats) where {iip, algType, uType, - resType, T, pType, INType, - tolType, - probType - } - new{iip, algType, uType, resType, T, pType, INType, tolType, - probType - }(f, alg, uₙ, uₙ₋₁, fuₙ, fuₙ₋₁, 𝒹, ℋ, f₍ₙₒᵣₘ₎ₙ₋₁, f₍ₙₒᵣₘ₎₀, M, σₙ, - σₘᵢₙ, σₘₐₓ, α₁, γ, τₘᵢₙ, - τₘₐₓ, nₑₓₚ, p, force_stop, maxiters, internalnorm, - retcode, - abstol, prob, stats) - end end -function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::DFSane, - args...; - alias_u0 = false, - maxiters = 1000, - abstol = 1e-6, - internalnorm = DEFAULT_NORM, - kwargs...) where {uType, iip} - if alias_u0 - uₙ = prob.u0 - else - uₙ = deepcopy(prob.u0) - end +function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::DFSane, args...; + alias_u0 = false, maxiters = 1000, abstol = 1e-6, internalnorm = DEFAULT_NORM, + kwargs...) where {uType, iip} + uₙ = alias_u0 ? prob.u0 : deepcopy(prob.u0) p = prob.p T = eltype(uₙ) σₘᵢₙ, σₘₐₓ, γ, τₘᵢₙ, τₘₐₓ = T(alg.σ_min), T(alg.σ_max), T(alg.γ), T(alg.τ_min), - T(alg.τ_max) + T(alg.τ_max) α₁ = one(T) γ = T(alg.γ) f₍ₙₒᵣₘ₎ₙ₋₁ = α₁ @@ -157,27 +111,27 @@ function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::DFSane, 𝒹, uₙ₋₁, fuₙ, fuₙ₋₁ = copy(uₙ), copy(uₙ), copy(uₙ), copy(uₙ) if iip - f = (dx, x) -> prob.f(dx, x, p) - f(fuₙ₋₁, uₙ₋₁) + # f = (dx, x) -> prob.f(dx, x, p) + # f(fuₙ₋₁, uₙ₋₁) + prob.f(fuₙ₋₁, uₙ₋₁, p) else - f = (x) -> prob.f(x, p) - fuₙ₋₁ = f(uₙ₋₁) + # f = (x) -> prob.f(x, p) + fuₙ₋₁ = prob.f(uₙ₋₁, p) # f(uₙ₋₁) end f₍ₙₒᵣₘ₎ₙ₋₁ = norm(fuₙ₋₁)^nₑₓₚ f₍ₙₒᵣₘ₎₀ = f₍ₙₒᵣₘ₎ₙ₋₁ ℋ = fill(f₍ₙₒᵣₘ₎ₙ₋₁, M) - return DFSaneCache{iip}(f, alg, uₙ, uₙ₋₁, fuₙ, fuₙ₋₁, 𝒹, ℋ, f₍ₙₒᵣₘ₎ₙ₋₁, f₍ₙₒᵣₘ₎₀, - M, σₙ, σₘᵢₙ, σₘₐₓ, α₁, γ, τₘᵢₙ, - τₘₐₓ, nₑₓₚ, p, false, maxiters, - internalnorm, ReturnCode.Default, abstol, prob, - NLStats(1, 0, 0, 0, 0)) + return DFSaneCache{iip}(alg, uₙ, uₙ₋₁, fuₙ, fuₙ₋₁, 𝒹, ℋ, f₍ₙₒᵣₘ₎ₙ₋₁, f₍ₙₒᵣₘ₎₀, + M, σₙ, σₘᵢₙ, σₘₐₓ, α₁, γ, τₘᵢₙ, τₘₐₓ, nₑₓₚ, p, false, maxiters, + internalnorm, ReturnCode.Default, abstol, prob, NLStats(1, 0, 0, 0, 0)) end function perform_step!(cache::DFSaneCache{true}) - @unpack f, alg, f₍ₙₒᵣₘ₎ₙ₋₁, f₍ₙₒᵣₘ₎₀, - σₙ, σₘᵢₙ, σₘₐₓ, α₁, γ, τₘᵢₙ, τₘₐₓ, nₑₓₚ, M = cache + @unpack alg, f₍ₙₒᵣₘ₎ₙ₋₁, f₍ₙₒᵣₘ₎₀, σₙ, σₘᵢₙ, σₘₐₓ, α₁, γ, τₘᵢₙ, τₘₐₓ, nₑₓₚ, M = cache + + f = (dx, x) -> cache.prob.f(dx, x, cache.p) T = eltype(cache.uₙ) n = cache.stats.nsteps @@ -202,10 +156,9 @@ function perform_step!(cache::DFSaneCache{true}) f₍ₙₒᵣₘ₎ₙ ≤ 𝒸 && break - α₊ = clamp(α₊^2 * f₍ₙₒᵣₘ₎ₙ₋₁ / - (f₍ₙₒᵣₘ₎ₙ + (T(2) * α₊ - T(1)) * f₍ₙₒᵣₘ₎ₙ₋₁), - τₘᵢₙ * α₊, - τₘₐₓ * α₊) + α₊ = clamp(α₊^2 * f₍ₙₒᵣₘ₎ₙ₋₁ / (f₍ₙₒᵣₘ₎ₙ + (T(2) * α₊ - T(1)) * f₍ₙₒᵣₘ₎ₙ₋₁), + τₘᵢₙ * α₊, + τₘₐₓ * α₊) @. cache.uₙ = cache.uₙ₋₁ - α₋ * cache.𝒹 f(cache.fuₙ, cache.uₙ) @@ -214,8 +167,8 @@ function perform_step!(cache::DFSaneCache{true}) f₍ₙₒᵣₘ₎ₙ .≤ 𝒸 && break α₋ = clamp(α₋^2 * f₍ₙₒᵣₘ₎ₙ₋₁ / (f₍ₙₒᵣₘ₎ₙ + (T(2) * α₋ - T(1)) * f₍ₙₒᵣₘ₎ₙ₋₁), - τₘᵢₙ * α₋, - τₘₐₓ * α₋) + τₘᵢₙ * α₋, + τₘₐₓ * α₋) @. cache.uₙ = cache.uₙ₋₁ + α₊ * cache.𝒹 f(cache.fuₙ, cache.uₙ) @@ -253,8 +206,9 @@ function perform_step!(cache::DFSaneCache{true}) end function perform_step!(cache::DFSaneCache{false}) - @unpack f, alg, f₍ₙₒᵣₘ₎ₙ₋₁, f₍ₙₒᵣₘ₎₀, - σₙ, σₘᵢₙ, σₘₐₓ, α₁, γ, τₘᵢₙ, τₘₐₓ, nₑₓₚ, M = cache + @unpack alg, f₍ₙₒᵣₘ₎ₙ₋₁, f₍ₙₒᵣₘ₎₀, σₙ, σₘᵢₙ, σₘₐₓ, α₁, γ, τₘᵢₙ, τₘₐₓ, nₑₓₚ, M = cache + + f = x -> cache.prob.f(x, cache.p) T = eltype(cache.uₙ) n = cache.stats.nsteps @@ -279,10 +233,8 @@ function perform_step!(cache::DFSaneCache{false}) f₍ₙₒᵣₘ₎ₙ ≤ 𝒸 && break - α₊ = clamp(α₊^2 * f₍ₙₒᵣₘ₎ₙ₋₁ / - (f₍ₙₒᵣₘ₎ₙ + (T(2) * α₊ - T(1)) * f₍ₙₒᵣₘ₎ₙ₋₁), - τₘᵢₙ * α₊, - τₘₐₓ * α₊) + α₊ = clamp(α₊^2 * f₍ₙₒᵣₘ₎ₙ₋₁ / (f₍ₙₒᵣₘ₎ₙ + (T(2) * α₊ - T(1)) * f₍ₙₒᵣₘ₎ₙ₋₁), + τₘᵢₙ * α₊, τₘₐₓ * α₊) cache.uₙ = @. cache.uₙ₋₁ - α₋ * cache.𝒹 cache.fuₙ = f(cache.uₙ) @@ -291,8 +243,7 @@ function perform_step!(cache::DFSaneCache{false}) f₍ₙₒᵣₘ₎ₙ .≤ 𝒸 && break α₋ = clamp(α₋^2 * f₍ₙₒᵣₘ₎ₙ₋₁ / (f₍ₙₒᵣₘ₎ₙ + (T(2) * α₋ - T(1)) * f₍ₙₒᵣₘ₎ₙ₋₁), - τₘᵢₙ * α₋, - τₘₐₓ * α₋) + τₘᵢₙ * α₋, τₘₐₓ * α₋) cache.uₙ = @. cache.uₙ₋₁ + α₊ * cache.𝒹 cache.fuₙ = f(cache.uₙ) @@ -341,25 +292,23 @@ function SciMLBase.solve!(cache::DFSaneCache) cache.retcode = ReturnCode.Success end - SciMLBase.build_solution(cache.prob, cache.alg, cache.uₙ, cache.fuₙ; - retcode = cache.retcode, stats = cache.stats) + return SciMLBase.build_solution(cache.prob, cache.alg, cache.uₙ, cache.fuₙ; + retcode = cache.retcode, stats = cache.stats) end function SciMLBase.reinit!(cache::DFSaneCache{iip}, u0 = cache.uₙ; p = cache.p, - abstol = cache.abstol, maxiters = cache.maxiters) where {iip} + abstol = cache.abstol, maxiters = cache.maxiters) where {iip} cache.p = p if iip recursivecopy!(cache.uₙ, u0) recursivecopy!(cache.uₙ₋₁, u0) - cache.f = (dx, x) -> cache.prob.f(dx, x, p) - cache.f(cache.fuₙ, cache.uₙ) - cache.f(cache.fuₙ₋₁, cache.uₙ) + cache.prob.f(cache.fuₙ, cache.uₙ, p) + cache.prob.f(cache.fuₙ₋₁, cache.uₙ, p) else cache.uₙ = u0 cache.uₙ₋₁ = u0 - cache.f = (x) -> cache.prob.f(x, p) - cache.fuₙ = cache.f(cache.uₙ) - cache.fuₙ₋₁ = cache.f(cache.uₙ) + cache.fuₙ = cache.prob.f(cache.uₙ, p) + cache.fuₙ₋₁ = cache.prob.f(cache.uₙ, p) end cache.f₍ₙₒᵣₘ₎ₙ₋₁ = norm(cache.fuₙ₋₁)^cache.nₑₓₚ diff --git a/src/gaussnewton.jl b/src/gaussnewton.jl index 42521189e..6dcb0f835 100644 --- a/src/gaussnewton.jl +++ b/src/gaussnewton.jl @@ -14,7 +14,8 @@ for large-scale and numerically-difficult nonlinear least squares problems. - `autodiff`: determines the backend used for the Jacobian. Note that this argument is ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to - `AutoForwardDiff()`. Valid choices are types from ADTypes.jl. + `nothing` which means that a default is selected according to the problem specification! + Valid choices are types from ADTypes.jl. - `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, then the Jacobian will not be constructed and instead direct Jacobian-vector products `J*v` are computed using forward-mode automatic differentiation or finite differencing @@ -41,6 +42,10 @@ for large-scale and numerically-difficult nonlinear least squares problems. precs end +function set_ad(alg::GaussNewton{CJ}, ad) where {CJ} + return GaussNewton{CJ}(ad, alg.linsolve, alg.precs) +end + function GaussNewton(; concrete_jac = nothing, linsolve = CholeskyFactorization(), precs = DEFAULT_PRECS, adkwargs...) ad = default_adargs_to_adtype(; adkwargs...) @@ -71,9 +76,10 @@ end stats::NLStats end -function SciMLBase.__init(prob::NonlinearLeastSquaresProblem{uType, iip}, alg::GaussNewton, +function SciMLBase.__init(prob::NonlinearLeastSquaresProblem{uType, iip}, alg_::GaussNewton, args...; alias_u0 = false, maxiters = 1000, abstol = 1e-6, internalnorm = DEFAULT_NORM, kwargs...) where {uType, iip} + alg = get_concrete_algorithm(alg_, prob) @unpack f, u0, p = prob u = alias_u0 ? u0 : deepcopy(u0) if iip diff --git a/src/levenberg.jl b/src/levenberg.jl index 07a8b8251..f35f35cb2 100644 --- a/src/levenberg.jl +++ b/src/levenberg.jl @@ -14,7 +14,8 @@ numerically-difficult nonlinear systems. - `autodiff`: determines the backend used for the Jacobian. Note that this argument is ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to - `AutoForwardDiff()`. Valid choices are types from ADTypes.jl. + `nothing` which means that a default is selected according to the problem specification! + Valid choices are types from ADTypes.jl. - `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, then the Jacobian will not be constructed and instead direct Jacobian-vector products `J*v` are computed using forward-mode automatic differentiation or finite differencing @@ -86,6 +87,12 @@ numerically-difficult nonlinear systems. min_damping_D::T end +function set_ad(alg::LevenbergMarquardt{CJ}, ad) where {CJ} + return LevenbergMarquardt{CJ}(ad, alg.linsolve, alg.precs, alg.damping_initial, + alg.damping_increase_factor, alg.damping_decrease_factor, + alg.finite_diff_step_geodesic, alg.α_geodesic, alg.b_uphill, alg.min_damping_D) +end + function LevenbergMarquardt(; concrete_jac = nothing, linsolve = nothing, precs = DEFAULT_PRECS, damping_initial::Real = 1.0, damping_increase_factor::Real = 2.0, damping_decrease_factor::Real = 3.0, finite_diff_step_geodesic::Real = 0.1, @@ -141,9 +148,10 @@ end end function SciMLBase.__init(prob::Union{NonlinearProblem{uType, iip}, - NonlinearLeastSquaresProblem{uType, iip}}, alg::LevenbergMarquardt, + NonlinearLeastSquaresProblem{uType, iip}}, alg_::LevenbergMarquardt, args...; alias_u0 = false, maxiters = 1000, abstol = 1e-6, internalnorm = DEFAULT_NORM, linsolve_kwargs = (;), kwargs...) where {uType, iip} + alg = get_concrete_algorithm(alg_, prob) @unpack f, u0, p = prob u = alias_u0 ? u0 : deepcopy(u0) fu1 = evaluate_f(prob, u) diff --git a/src/raphson.jl b/src/raphson.jl index a1fa1a1c5..642c7ee36 100644 --- a/src/raphson.jl +++ b/src/raphson.jl @@ -10,7 +10,8 @@ for large-scale and numerically-difficult nonlinear systems. - `autodiff`: determines the backend used for the Jacobian. Note that this argument is ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to - `AutoForwardDiff()`. Valid choices are types from ADTypes.jl. + `nothing` which means that a default is selected according to the problem specification! + Valid choices are types from ADTypes.jl. - `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, then the Jacobian will not be constructed and instead direct Jacobian-vector products `J*v` are computed using forward-mode automatic differentiation or finite differencing @@ -36,6 +37,10 @@ for large-scale and numerically-difficult nonlinear systems. linesearch end +function set_ad(alg::NewtonRaphson{CJ}, ad) where {CJ} + return NewtonRaphson{CJ}(ad, alg.linsolve, alg.precs, alg.linesearch) +end + function NewtonRaphson(; concrete_jac = nothing, linsolve = nothing, linesearch = LineSearch(), precs = DEFAULT_PRECS, adkwargs...) ad = default_adargs_to_adtype(; adkwargs...) @@ -65,9 +70,10 @@ end lscache end -function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::NewtonRaphson, args...; +function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg_::NewtonRaphson, args...; alias_u0 = false, maxiters = 1000, abstol = 1e-6, internalnorm = DEFAULT_NORM, linsolve_kwargs = (;), kwargs...) where {uType, iip} + alg = get_concrete_algorithm(alg_, prob) @unpack f, u0, p = prob u = alias_u0 ? u0 : deepcopy(u0) fu1 = evaluate_f(prob, u) diff --git a/src/trustRegion.jl b/src/trustRegion.jl index 50d1078f3..769f5e75c 100644 --- a/src/trustRegion.jl +++ b/src/trustRegion.jl @@ -28,14 +28,14 @@ states as `RadiusUpdateSchemes.T`. Simply put the desired scheme as follows: """ `RadiusUpdateSchemes.NLsolve` - The same updating scheme as in NLsolve's (https://github.com/JuliaNLSolvers/NLsolve.jl) trust region dogleg implementation. + The same updating scheme as in NLsolve's (https://github.com/JuliaNLSolvers/NLsolve.jl) trust region dogleg implementation. """ NLsolve """ `RadiusUpdateSchemes.NocedalWright` - Trust region updating scheme as in Nocedal and Wright [see Alg 11.5, page 291]. + Trust region updating scheme as in Nocedal and Wright [see Alg 11.5, page 291]. """ NocedalWright @@ -97,7 +97,8 @@ for large-scale and numerically-difficult nonlinear systems. - `autodiff`: determines the backend used for the Jacobian. Note that this argument is ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to - `AutoForwardDiff()`. Valid choices are types from ADTypes.jl. + `nothing` which means that a default is selected according to the problem specification!. + Valid choices are types from ADTypes.jl. - `concrete_jac`: whether to build a concrete Jacobian. If a Krylov-subspace method is used, then the Jacobian will not be constructed and instead direct Jacobian-vector products `J*v` are computed using forward-mode automatic differentiation or finite differencing @@ -162,6 +163,13 @@ for large-scale and numerically-difficult nonlinear systems. max_shrink_times::Int end +function set_ad(alg::TrustRegion{CJ}, ad) where {CJ} + return TrustRegion{CJ}(ad, alg.linsolve, alg.precs, alg.radius_update_scheme, + alg.max_trust_radius, alg.initial_trust_radius, alg.step_threshold, + alg.shrink_threshold, alg.expand_threshold, alg.shrink_factor, alg.expand_factor, + alg.max_shrink_times) +end + function TrustRegion(; concrete_jac = nothing, linsolve = nothing, precs = DEFAULT_PRECS, radius_update_scheme::RadiusUpdateSchemes.T = RadiusUpdateSchemes.Simple, #defaults to conventional radius update max_trust_radius::Real = 0 // 1, initial_trust_radius::Real = 0 // 1, @@ -222,9 +230,10 @@ end stats::NLStats end -function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::TrustRegion, args...; +function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg_::TrustRegion, args...; alias_u0 = false, maxiters = 1000, abstol = 1e-8, internalnorm = DEFAULT_NORM, linsolve_kwargs = (;), kwargs...) where {uType, iip} + alg = get_concrete_algorithm(alg_, prob) @unpack f, u0, p = prob u = alias_u0 ? u0 : deepcopy(u0) u_prev = zero(u) @@ -239,7 +248,7 @@ function SciMLBase.__init(prob::NonlinearProblem{uType, iip}, alg::TrustRegion, u_gauss_newton = zero(u) loss_new = loss - H = zero(J) + H = zero(J' * J) g = _mutable_zero(fu1) shrink_counter = 0 fu_new = zero(fu1) @@ -341,7 +350,7 @@ function perform_step!(cache::TrustRegionCache{true}) mul!(cache.g, J', fu) cache.stats.njacs += 1 - # do not use A = cache.H, b = _vec(cache.g) since it is equivalent + # do not use A = cache.H, b = _vec(cache.g) since it is equivalent # to A = cache.J, b = _vec(fu) as long as the Jacobian is non-singular linres = dolinsolve(alg.precs, linsolve, A = J, b = _vec(fu), linu = _vec(u_gauss_newton), @@ -450,12 +459,12 @@ function trust_region_step!(cache::TrustRegionCache) cache.make_new_J = false end - # trust region update - if r < 1 // 10 # cache.shrink_threshold - cache.trust_r *= 1 // 2 # cache.shrink_factor - elseif r >= 9 // 10 # cache.expand_threshold - cache.trust_r = 2 * norm(cache.du) # cache.expand_factor * norm(cache.du) - elseif r >= 1 // 2 # cache.p1 + # trust region update + if r < 1 // 10 # cache.shrink_threshold + cache.trust_r *= 1 // 2 # cache.shrink_factor + elseif r >= 9 // 10 # cache.expand_threshold + cache.trust_r = 2 * norm(cache.du) # cache.expand_factor * norm(cache.du) + elseif r >= 1 // 2 # cache.p1 cache.trust_r = max(cache.trust_r, 2 * norm(cache.du)) # cache.expand_factor * norm(cache.du)) end diff --git a/src/utils.jl b/src/utils.jl index 6dd1eeb75..173c279be 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -13,28 +13,41 @@ end @inline DEFAULT_NORM(u::AbstractArray) = sqrt(real(sum(UNITLESS_ABS2, u)) / length(u)) @inline DEFAULT_NORM(u) = norm(u) -alg_autodiff(alg::AbstractNewtonAlgorithm{<:AbstractFiniteDifferencesMode}) = false -alg_autodiff(alg::AbstractNewtonAlgorithm) = true -alg_autodiff(alg) = false - """ default_adargs_to_adtype(; chunk_size = Val{0}(), autodiff = Val{true}(), standardtag = Val{true}(), diff_type = Val{:forward}) Construct the AD type from the arguments. This is mostly needed for compatibility with older code. + +!!! warning + `chunk_size`, `standardtag`, `diff_type`, and `autodiff::Union{Val, Bool}` are + deprecated and will be removed in v3. Update your code to directly specify + `autodiff=`. """ -function default_adargs_to_adtype(; chunk_size = Val{0}(), autodiff = Val{true}(), - standardtag = Val{true}(), diff_type = Val{:forward}()) - ad = _unwrap_val(autodiff) - # Old API - if ad isa Bool - # FIXME: standardtag is not the Tag - ad && return AutoForwardDiff(; chunksize = _unwrap_val(chunk_size), - tag = _unwrap_val(standardtag)) - return AutoFiniteDiff(; fdtype = diff_type) +function default_adargs_to_adtype(; chunk_size = missing, autodiff = nothing, + standardtag = missing, diff_type = missing) + # If using the new API short circuit + autodiff === nothing && return nothing + autodiff isa ADTypes.AbstractADType && return autodiff + + # Deprecate the old version + if chunk_size !== missing || standardtag !== missing || diff_type !== missing || + autodiff !== missing + Base.depwarn("`chunk_size`, `standardtag`, `diff_type`, \ + `autodiff::Union{Val, Bool}` kwargs have been deprecated and will be removed in\ + v3. Update your code to directly specify autodiff=", + :default_adargs_to_adtype) end - return ad + chunk_size === missing && (chunk_size = Val{0}()) + standardtag === missing && (standardtag = Val{true}()) + diff_type === missing && (diff_type = Val{:forward}()) + autodiff === missing && (autodiff = Val{true}()) + + ad = _unwrap_val(autodiff) + tag = _unwrap_val(standardtag) + ad && return AutoForwardDiff{_unwrap_val(chunk_size), typeof(tag)}(tag) + return AutoFiniteDiff(; fdtype = diff_type) end """ @@ -171,3 +184,27 @@ Defaults to `mul!(C, A, B)`. However, for sparse matrices uses `C .= A * B`. """ __matmul!(C, A, B) = mul!(C, A, B) __matmul!(C::AbstractSparseMatrix, A, B) = C .= A * B + +# Concretize Algorithms +function get_concrete_algorithm(alg, prob) + !hasfield(typeof(alg), :ad) && return alg + alg.ad isa ADTypes.AbstractADType && return alg + + # Figure out the default AD + # Now that we have handed trivial cases, we can allow extending this function + # for specific algorithms + return __get_concrete_algorithm(alg, prob) +end + +function __get_concrete_algorithm(alg, prob) + @unpack sparsity, jac_prototype = prob.f + use_sparse_ad = sparsity !== nothing || jac_prototype !== nothing + ad = if eltype(prob.u0) <: Complex + # Use Finite Differencing + use_sparse_ad ? AutoSparseFiniteDiff() : AutoFiniteDiff() + else + use_sparse_ad ? AutoSparseForwardDiff() : + AutoForwardDiff{ForwardDiff.pickchunksize(length(prob.u0)), Nothing}(nothing) + end + return set_ad(alg, ad) +end diff --git a/test/basictests.jl b/test/basictests.jl index 51756cf97..06cfc103d 100644 --- a/test/basictests.jl +++ b/test/basictests.jl @@ -225,7 +225,8 @@ end radius_update_scheme in radius_update_schemes probN = NonlinearProblem(quadratic_f, u0, 2.0) - @test all(solve(probN, TrustRegion(; autodiff, radius_update_scheme)).u .≈ sqrt(2.0)) + @test all(solve(probN, TrustRegion(; autodiff, radius_update_scheme)).u .≈ + sqrt(2.0)) end # Test that `TrustRegion` passes a test that `NewtonRaphson` fails on. @@ -391,18 +392,17 @@ end end end - # --- DFSane tests --- @testset "DFSane" begin - function benchmark_nlsolve_oop(f, u0, p=2.0) + function benchmark_nlsolve_oop(f, u0, p = 2.0) prob = NonlinearProblem{false}(f, u0, p) - return solve(prob, DFSane(), abstol=1e-9) + return solve(prob, DFSane(), abstol = 1e-9) end - function benchmark_nlsolve_iip(f, u0, p=2.0) + function benchmark_nlsolve_iip(f, u0, p = 2.0) prob = NonlinearProblem{true}(f, u0, p) - return solve(prob, DFSane(), abstol=1e-9) + return solve(prob, DFSane(), abstol = 1e-9) end u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) @@ -413,7 +413,7 @@ end @test all(abs.(sol.u .* sol.u .- 2) .< 1e-9) cache = init(NonlinearProblem{false}(quadratic_f, u0, 2.0), DFSane(), - abstol=1e-9) + abstol = 1e-9) @test (@ballocated solve!($cache)) < 200 end @@ -424,11 +424,10 @@ end @test all(abs.(sol.u .* sol.u .- 2) .< 1e-9) cache = init(NonlinearProblem{true}(quadratic_f!, u0, 2.0), - DFSane(), abstol=1e-9) + DFSane(), abstol = 1e-9) @test (@ballocated solve!($cache)) ≤ 64 end - @testset "[OOP] [Immutable AD]" begin broken_forwarddiff = [1.6, 2.9, 3.0, 3.5, 4.0, 81.0] for p in 1.1:0.1:100.0 @@ -437,14 +436,14 @@ end if any(x -> isnan(x) || x <= 1e-5 || x >= 1e5, res) @test_broken all(res .≈ sqrt(p)) @test_broken abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, - @SVector[1.0, 1.0], p).u[end], p)) ≈ 1 / (2 * sqrt(p)) + @SVector[1.0, 1.0], p).u[end], p)) ≈ 1 / (2 * sqrt(p)) elseif p in broken_forwarddiff @test_broken abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, - @SVector[1.0, 1.0], p).u[end], p)) ≈ 1 / (2 * sqrt(p)) + @SVector[1.0, 1.0], p).u[end], p)) ≈ 1 / (2 * sqrt(p)) else @test all(res .≈ sqrt(p)) @test isapprox(abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, - @SVector[1.0, 1.0], p).u[end], p)), 1 / (2 * sqrt(p))) + @SVector[1.0, 1.0], p).u[end], p)), 1 / (2 * sqrt(p))) end end end @@ -456,12 +455,22 @@ end if any(x -> isnan(x) || x <= 1e-5 || x >= 1e5, res) @test_broken res ≈ sqrt(p) - @test_broken abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, 1.0, p).u, p)) ≈ 1 / (2 * sqrt(p)) + @test_broken abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, + 1.0, + p).u, + p)) ≈ 1 / (2 * sqrt(p)) elseif p in broken_forwarddiff - @test_broken abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, 1.0, p).u, p)) ≈ 1 / (2 * sqrt(p)) + @test_broken abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, + 1.0, + p).u, + p)) ≈ 1 / (2 * sqrt(p)) else @test res ≈ sqrt(p) - @test isapprox(abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, 1.0, p).u, p)), 1 / (2 * sqrt(p))) + @test isapprox(abs.(ForwardDiff.derivative(p -> benchmark_nlsolve_oop(quadratic_f, + 1.0, + p).u, + p)), + 1 / (2 * sqrt(p))) end end end @@ -475,20 +484,19 @@ end # Iterator interface function nlprob_iterator_interface(f, p_range, ::Val{iip}) where {iip} probN = NonlinearProblem{iip}(f, iip ? [0.5] : 0.5, p_range[begin]) - cache = init(probN, DFSane(); maxiters=100, abstol=1e-10) + cache = init(probN, DFSane(); maxiters = 100, abstol = 1e-10) sols = zeros(length(p_range)) for (i, p) in enumerate(p_range) - reinit!(cache, iip ? [cache.uₙ[1]] : cache.uₙ; p=p) + reinit!(cache, iip ? [cache.uₙ[1]] : cache.uₙ; p = p) sol = solve!(cache) sols[i] = iip ? sol.u[1] : sol.u end return sols end - p = range(0.01, 2, length=200) + p = range(0.01, 2, length = 200) @test abs.(nlprob_iterator_interface(quadratic_f, p, Val(false))) ≈ sqrt.(p) @test abs.(nlprob_iterator_interface(quadratic_f!, p, Val(true))) ≈ sqrt.(p) - # Test that `DFSane` passes a test that `NewtonRaphson` fails on. @testset "Newton Raphson Fails" begin u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] @@ -518,18 +526,18 @@ end η_strategy) for options in list_of_options local probN, sol, alg - alg = DFSane(σ_min=options[1], - σ_max=options[2], - σ_1=options[3], - M=options[4], - γ=options[5], - τ_min=options[6], - τ_max=options[7], - n_exp=options[8], - η_strategy=options[9]) + alg = DFSane(σ_min = options[1], + σ_max = options[2], + σ_1 = options[3], + M = options[4], + γ = options[5], + τ_min = options[6], + τ_max = options[7], + n_exp = options[8], + η_strategy = options[9]) probN = NonlinearProblem{false}(quadratic_f, [1.0, 1.0], 2.0) - sol = solve(probN, alg, abstol=1e-11) + sol = solve(probN, alg, abstol = 1e-11) println(abs.(quadratic_f(sol.u, 2.0))) @test all(abs.(quadratic_f(sol.u, 2.0)) .< 1e-10) end diff --git a/test/polyalgs.jl b/test/polyalgs.jl index 0bdfe42f8..4497eae97 100644 --- a/test/polyalgs.jl +++ b/test/polyalgs.jl @@ -2,13 +2,28 @@ using NonlinearSolve, Test f(u, p) = u .* u .- 2 u0 = [1.0, 1.0] -probN = NonlinearProblem(f, u0) -@time solver = solve(probN, abstol = 1e-9) -@time solver = solve(probN, RobustMultiNewton(), abstol = 1e-9) -@time solver = solve(probN, FastShortcutNonlinearPolyalg(), abstol = 1e-9) +probN = NonlinearProblem{false}(f, u0) + +# Uses the `__solve` function +@time solver = solve(probN; abstol = 1e-9) +@test SciMLBase.successful_retcode(solver) +@time solver = solve(probN, RobustMultiNewton(); abstol = 1e-9) +@test SciMLBase.successful_retcode(solver) +@time solver = solve(probN, FastShortcutNonlinearPolyalg(); abstol = 1e-9) +@test SciMLBase.successful_retcode(solver) + +# Test the caching interface +cache = init(probN; abstol = 1e-9); +@time solver = solve!(cache) +@test SciMLBase.successful_retcode(solver) +cache = init(probN, RobustMultiNewton(); abstol = 1e-9); +@time solver = solve!(cache) +@test SciMLBase.successful_retcode(solver) +cache = init(probN, FastShortcutNonlinearPolyalg(); abstol = 1e-9); +@time solver = solve!(cache) +@test SciMLBase.successful_retcode(solver) # https://github.com/SciML/NonlinearSolve.jl/issues/153 - function f(du, u, p) s1, s1s2, s2 = u k1, c1, Δt = p @@ -18,13 +33,12 @@ function f(du, u, p) du[3] = -0.25 * c1 * k1 * s1 * s2 end -prob = NonlinearProblem(f, [2.0,2.0,2.0], [1.0, 2.0, 2.5]) +prob = NonlinearProblem(f, [2.0, 2.0, 2.0], [1.0, 2.0, 2.5]) sol = solve(prob) @test SciMLBase.successful_retcode(sol) # https://github.com/SciML/NonlinearSolve.jl/issues/187 - -ff(u, p) = 0.5/1.5*log.(u./(1.0.-u)) .- 2.0*u .+1.0 +ff(u, p) = 0.5 / 1.5 * log.(u ./ (1.0 .- u)) .- 2.0 * u .+ 1.0 uspan = (0.02, 0.1) prob = IntervalNonlinearProblem(ff, uspan) @@ -35,4 +49,4 @@ u0 = 0.06 p = 2.0 prob = NonlinearProblem(ff, u0, p) solver = solve(prob) -@test SciMLBase.successful_retcode(sol) \ No newline at end of file +@test SciMLBase.successful_retcode(sol)