diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml index 0f93ea574..3b82401e8 100644 --- a/.JuliaFormatter.toml +++ b/.JuliaFormatter.toml @@ -1,3 +1,4 @@ style = "sciml" format_markdown = true +format_docstrings = true annotate_untyped_fields_with_any = false \ No newline at end of file diff --git a/benchmarks/applelu.jl b/benchmarks/applelu.jl index c346ff0c9..c0250a687 100644 --- a/benchmarks/applelu.jl +++ b/benchmarks/applelu.jl @@ -23,7 +23,7 @@ algs = [ GenericLUFactorization(), RFLUFactorization(), AppleAccelerateLUFactorization(), - MetalLUFactorization(), + MetalLUFactorization() ] res = [Float32[] for i in 1:length(algs)] diff --git a/benchmarks/lu.jl b/benchmarks/lu.jl index 7ab624b45..b8d76a517 100644 --- a/benchmarks/lu.jl +++ b/benchmarks/lu.jl @@ -24,7 +24,7 @@ algs = [ RFLUFactorization(), MKLLUFactorization(), FastLUFactorization(), - SimpleLUFactorization(), + SimpleLUFactorization() ] res = [Float64[] for i in 1:length(algs)] diff --git a/benchmarks/sparselu.jl b/benchmarks/sparselu.jl index db917b1cc..aa714f771 100644 --- a/benchmarks/sparselu.jl +++ b/benchmarks/sparselu.jl @@ -36,7 +36,7 @@ algs = [ UMFPACKFactorization(), KLUFactorization(), MKLPardisoFactorize(), - SparspakFactorization(), + SparspakFactorization() ] cols = [:red, :blue, :green, :magenta, :turqoise] # one color per alg lst = [:dash, :solid, :dashdot] # one line style per dim @@ -65,7 +65,8 @@ function run_and_plot(; dims = [1, 2, 3], kmax = 12) u0 = rand(rng, n) for j in 1:length(algs) - bt = @belapsed solve(prob, $(algs[j])).u setup=(prob = LinearProblem(copy($A), + bt = @belapsed solve(prob, $(algs[j])).u setup=(prob = LinearProblem( + copy($A), copy($b); u0 = copy($u0), alias_A = true, diff --git a/docs/pages.jl b/docs/pages.jl index d8f2e3cda..efb625df1 100644 --- a/docs/pages.jl +++ b/docs/pages.jl @@ -2,7 +2,7 @@ pages = ["index.md", "Tutorials" => Any["tutorials/linear.md" - "tutorials/caching_interface.md"], + "tutorials/caching_interface.md"], "Basics" => Any["basics/LinearProblem.md", "basics/common_solver_opts.md", "basics/OperatorAssumptions.md", @@ -10,6 +10,6 @@ pages = ["index.md", "basics/FAQ.md"], "Solvers" => Any["solvers/solvers.md"], "Advanced" => Any["advanced/developing.md" - "advanced/custom.md"], - "Release Notes" => "release_notes.md", + "advanced/custom.md"], + "Release Notes" => "release_notes.md" ] diff --git a/docs/src/advanced/developing.md b/docs/src/advanced/developing.md index f3861173b..c15c8b70e 100644 --- a/docs/src/advanced/developing.md +++ b/docs/src/advanced/developing.md @@ -18,7 +18,7 @@ basic machinery. A simplified version is: struct MyLUFactorization{P} <: SciMLBase.AbstractLinearAlgorithm end function init_cacheval(alg::MyLUFactorization, A, b, u, Pl, Pr, maxiters, abstol, reltol, - verbose) + verbose) lu!(convert(AbstractMatrix, A)) end diff --git a/ext/LinearSolveBandedMatricesExt.jl b/ext/LinearSolveBandedMatricesExt.jl index c6d851894..3941b0864 100644 --- a/ext/LinearSolveBandedMatricesExt.jl +++ b/ext/LinearSolveBandedMatricesExt.jl @@ -2,7 +2,8 @@ module LinearSolveBandedMatricesExt using BandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, - do_factorization, init_cacheval, DefaultLinearSolver, DefaultAlgorithmChoice + do_factorization, init_cacheval, DefaultLinearSolver, + DefaultAlgorithmChoice # Defaults for BandedMatrices function defaultalg(A::BandedMatrix, b, oa::OperatorAssumptions{Bool}) @@ -35,14 +36,14 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :CholeskyFactorization) @eval begin function init_cacheval(::$(alg), ::BandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) return nothing end end end function init_cacheval(::LUFactorization, A::BandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) return lu(similar(A, 0, 0)) end @@ -54,8 +55,8 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :QRFactorization, :LUFactorization) @eval begin function init_cacheval(::$(alg), ::Symmetric{<:Number, <:BandedMatrix}, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + Pr, maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) return nothing end end diff --git a/ext/LinearSolveBlockDiagonalsExt.jl b/ext/LinearSolveBlockDiagonalsExt.jl index 4b1827200..1109bc4db 100644 --- a/ext/LinearSolveBlockDiagonalsExt.jl +++ b/ext/LinearSolveBlockDiagonalsExt.jl @@ -3,7 +3,7 @@ module LinearSolveBlockDiagonalsExt using LinearSolve, BlockDiagonals function LinearSolve.init_cacheval(alg::SimpleGMRES{false}, A::BlockDiagonal, b, args...; - kwargs...) + kwargs...) @assert ndims(A)==2 "ndims(A) == $(ndims(A)). `A` must have ndims == 2." # We need to perform this check even when `zeroinit == true`, since the type of the # cache is dependent on whether we are able to use the specialized dispatch. diff --git a/ext/LinearSolveCUDAExt.jl b/ext/LinearSolveCUDAExt.jl index c036d53e0..1cfb0888e 100644 --- a/ext/LinearSolveCUDAExt.jl +++ b/ext/LinearSolveCUDAExt.jl @@ -6,7 +6,7 @@ using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase, LinearSolve.ArrayInterfa using SciMLBase: AbstractSciMLOperator function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadFactorization; - kwargs...) + kwargs...) if cache.isfresh fact = qr(CUDA.CuArray(cache.A)) cache.cacheval = fact @@ -18,8 +18,8 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadFactor end function LinearSolve.init_cacheval(alg::CudaOffloadFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) qr(CUDA.CuArray(A)) end diff --git a/ext/LinearSolveEnzymeExt.jl b/ext/LinearSolveEnzymeExt.jl index 304e302d2..c8d89e874 100644 --- a/ext/LinearSolveEnzymeExt.jl +++ b/ext/LinearSolveEnzymeExt.jl @@ -4,12 +4,13 @@ using LinearSolve using LinearSolve.LinearAlgebra isdefined(Base, :get_extension) ? (import Enzyme) : (import ..Enzyme) - using Enzyme using EnzymeCore -function EnzymeCore.EnzymeRules.forward(func::Const{typeof(LinearSolve.init)}, ::Type{RT}, prob::EnzymeCore.Annotation{LP}, alg::Const; kwargs...) where {RT, LP <: LinearSolve.LinearProblem} +function EnzymeCore.EnzymeRules.forward( + func::Const{typeof(LinearSolve.init)}, ::Type{RT}, prob::EnzymeCore.Annotation{LP}, + alg::Const; kwargs...) where {RT, LP <: LinearSolve.LinearProblem} @assert !(prob isa Const) res = func.val(prob.val, alg.val; kwargs...) if RT <: Const @@ -26,11 +27,13 @@ function EnzymeCore.EnzymeRules.forward(func::Const{typeof(LinearSolve.init)}, : error("Unsupported return type $RT") end -function EnzymeCore.EnzymeRules.forward(func::Const{typeof(LinearSolve.solve!)}, ::Type{RT}, linsolve::EnzymeCore.Annotation{LP}; kwargs...) where {RT, LP <: LinearSolve.LinearCache} +function EnzymeCore.EnzymeRules.forward(func::Const{typeof(LinearSolve.solve!)}, + ::Type{RT}, linsolve::EnzymeCore.Annotation{LP}; + kwargs...) where {RT, LP <: LinearSolve.LinearCache} @assert !(linsolve isa Const) res = func.val(linsolve.val; kwargs...) - + if RT <: Const return res end @@ -56,7 +59,10 @@ function EnzymeCore.EnzymeRules.forward(func::Const{typeof(LinearSolve.solve!)}, return Duplicated(res, dres) end -function EnzymeCore.EnzymeRules.augmented_primal(config, func::Const{typeof(LinearSolve.init)}, ::Type{RT}, prob::EnzymeCore.Annotation{LP}, alg::Const; kwargs...) where {RT, LP <: LinearSolve.LinearProblem} +function EnzymeCore.EnzymeRules.augmented_primal( + config, func::Const{typeof(LinearSolve.init)}, + ::Type{RT}, prob::EnzymeCore.Annotation{LP}, alg::Const; + kwargs...) where {RT, LP <: LinearSolve.LinearProblem} res = func.val(prob.val, alg.val; kwargs...) dres = if EnzymeRules.width(config) == 1 func.val(prob.dval, alg.val; kwargs...) @@ -77,7 +83,6 @@ function EnzymeCore.EnzymeRules.augmented_primal(config, func::Const{typeof(Line (dval.b for dval in dres) end - prob_d_A = if EnzymeRules.width(config) == 1 prob.dval.A else @@ -92,7 +97,10 @@ function EnzymeCore.EnzymeRules.augmented_primal(config, func::Const{typeof(Line return EnzymeCore.EnzymeRules.AugmentedReturn(res, dres, (d_A, d_b, prob_d_A, prob_d_b)) end -function EnzymeCore.EnzymeRules.reverse(config, func::Const{typeof(LinearSolve.init)}, ::Type{RT}, cache, prob::EnzymeCore.Annotation{LP}, alg::Const; kwargs...) where {RT, LP <: LinearSolve.LinearProblem} +function EnzymeCore.EnzymeRules.reverse( + config, func::Const{typeof(LinearSolve.init)}, ::Type{RT}, + cache, prob::EnzymeCore.Annotation{LP}, alg::Const; + kwargs...) where {RT, LP <: LinearSolve.LinearProblem} d_A, d_b, prob_d_A, prob_d_b = cache if EnzymeRules.width(config) == 1 @@ -105,7 +113,7 @@ function EnzymeCore.EnzymeRules.reverse(config, func::Const{typeof(LinearSolve.i d_b .= 0 end else - for (_prob_d_A,_d_A,_prob_d_b, _d_b) in zip(prob_d_A, d_A, prob_d_b, d_b) + for (_prob_d_A, _d_A, _prob_d_b, _d_b) in zip(prob_d_A, d_A, prob_d_b, d_b) if _d_A !== _prob_d_A _prob_d_A .+= _d_A _d_A .= 0 @@ -123,7 +131,10 @@ end # y=inv(A) B # dA −= z y^T # dB += z, where z = inv(A^T) dy -function EnzymeCore.EnzymeRules.augmented_primal(config, func::Const{typeof(LinearSolve.solve!)}, ::Type{RT}, linsolve::EnzymeCore.Annotation{LP}; kwargs...) where {RT, LP <: LinearSolve.LinearCache} +function EnzymeCore.EnzymeRules.augmented_primal( + config, func::Const{typeof(LinearSolve.solve!)}, + ::Type{RT}, linsolve::EnzymeCore.Annotation{LP}; + kwargs...) where {RT, LP <: LinearSolve.LinearCache} res = func.val(linsolve.val; kwargs...) dres = if EnzymeRules.width(config) == 1 @@ -176,7 +187,9 @@ function EnzymeCore.EnzymeRules.augmented_primal(config, func::Const{typeof(Line return EnzymeCore.EnzymeRules.AugmentedReturn(res, dres, cache) end -function EnzymeCore.EnzymeRules.reverse(config, func::Const{typeof(LinearSolve.solve!)}, ::Type{RT}, cache, linsolve::EnzymeCore.Annotation{LP}; kwargs...) where {RT, LP <: LinearSolve.LinearCache} +function EnzymeCore.EnzymeRules.reverse(config, func::Const{typeof(LinearSolve.solve!)}, + ::Type{RT}, cache, linsolve::EnzymeCore.Annotation{LP}; + kwargs...) where {RT, LP <: LinearSolve.LinearCache} y, dys, _linsolve, dAs, dbs = cache @assert !(linsolve isa Const) @@ -202,7 +215,7 @@ function EnzymeCore.EnzymeRules.reverse(config, func::Const{typeof(LinearSolve.s LinearSolve.defaultalg_adjoint_eval(_linsolve, dy) else error("Algorithm $(_linsolve.alg) is currently not supported by Enzyme rules on LinearSolve.jl. Please open an issue on LinearSolve.jl detailing which algorithm is missing the adjoint handling") - end + end dA .-= z * transpose(y) db .+= z diff --git a/ext/LinearSolveFastAlmostBandedMatricesExt.jl b/ext/LinearSolveFastAlmostBandedMatricesExt.jl index 080ec30f5..1ceff10c5 100644 --- a/ext/LinearSolveFastAlmostBandedMatricesExt.jl +++ b/ext/LinearSolveFastAlmostBandedMatricesExt.jl @@ -2,7 +2,8 @@ module LinearSolveFastAlmostBandedMatricesExt using FastAlmostBandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, - do_factorization, init_cacheval, DefaultLinearSolver, DefaultAlgorithmChoice + do_factorization, init_cacheval, DefaultLinearSolver, + DefaultAlgorithmChoice function defaultalg(A::AlmostBandedMatrix, b, oa::OperatorAssumptions{Bool}) if oa.issq diff --git a/ext/LinearSolveHYPREExt.jl b/ext/LinearSolveHYPREExt.jl index 00f969fe3..cef49b7a8 100644 --- a/ext/LinearSolveHYPREExt.jl +++ b/ext/LinearSolveHYPREExt.jl @@ -4,8 +4,8 @@ using LinearAlgebra using HYPRE.LibHYPRE: HYPRE_Complex using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector using LinearSolve: HYPREAlgorithm, LinearCache, LinearProblem, LinearSolve, - OperatorAssumptions, default_tol, init_cacheval, __issquare, - __conditioning + OperatorAssumptions, default_tol, init_cacheval, __issquare, + __conditioning using SciMLBase: LinearProblem, SciMLBase using UnPack: @unpack using Setfield: @set! @@ -21,8 +21,8 @@ mutable struct HYPRECache end function LinearSolve.init_cacheval(alg::HYPREAlgorithm, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) return HYPRECache(nothing, nothing, nothing, nothing, true, true, true) end @@ -54,21 +54,21 @@ end # fill!(similar(b, size(A, 2)), false) since HYPREArrays are not AbstractArrays. function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, - args...; - alias_A = false, alias_b = false, - # TODO: Implement eltype for HYPREMatrix in HYPRE.jl? Looks useful - # even if it is not AbstractArray. - abstol = default_tol(prob.A isa HYPREMatrix ? HYPRE_Complex : - eltype(prob.A)), - reltol = default_tol(prob.A isa HYPREMatrix ? HYPRE_Complex : - eltype(prob.A)), - # TODO: Implement length() for HYPREVector in HYPRE.jl? - maxiters::Int = prob.b isa HYPREVector ? 1000 : length(prob.b), - verbose::Bool = false, - Pl = LinearAlgebra.I, - Pr = LinearAlgebra.I, - assumptions = OperatorAssumptions(), - kwargs...) + args...; + alias_A = false, alias_b = false, + # TODO: Implement eltype for HYPREMatrix in HYPRE.jl? Looks useful + # even if it is not AbstractArray. + abstol = default_tol(prob.A isa HYPREMatrix ? HYPRE_Complex : + eltype(prob.A)), + reltol = default_tol(prob.A isa HYPREMatrix ? HYPRE_Complex : + eltype(prob.A)), + # TODO: Implement length() for HYPREVector in HYPRE.jl? + maxiters::Int = prob.b isa HYPREVector ? 1000 : length(prob.b), + verbose::Bool = false, + Pl = LinearAlgebra.I, + Pr = LinearAlgebra.I, + assumptions = OperatorAssumptions(), + kwargs...) @unpack A, b, u0, p = prob A = A isa HYPREMatrix ? A : HYPREMatrix(A) @@ -89,7 +89,7 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, cache = LinearCache{ typeof(A), typeof(b), typeof(u0), typeof(p), typeof(alg), Tc, typeof(Pl), typeof(Pr), typeof(reltol), - typeof(__issquare(assumptions)), + typeof(__issquare(assumptions)) }(A, b, u0, p, alg, cacheval, isfresh, Pl, Pr, abstol, reltol, maxiters, verbose, assumptions) @@ -219,8 +219,8 @@ end # HYPREArrays are not AbstractArrays so perform some type-piracy function SciMLBase.LinearProblem(A::HYPREMatrix, b::HYPREVector, - p = SciMLBase.NullParameters(); - u0::Union{HYPREVector, Nothing} = nothing, kwargs...) + p = SciMLBase.NullParameters(); + u0::Union{HYPREVector, Nothing} = nothing, kwargs...) return LinearProblem{true}(A, b, p; u0 = u0, kwargs) end diff --git a/ext/LinearSolveIterativeSolversExt.jl b/ext/LinearSolveIterativeSolversExt.jl index 3d73197eb..507e75ad2 100644 --- a/ext/LinearSolveIterativeSolversExt.jl +++ b/ext/LinearSolveIterativeSolversExt.jl @@ -11,8 +11,8 @@ else end function LinearSolve.IterativeSolversJL(args...; - generate_iterator = IterativeSolvers.gmres_iterable!, - gmres_restart = 0, kwargs...) + generate_iterator = IterativeSolvers.gmres_iterable!, + gmres_restart = 0, kwargs...) return IterativeSolversJL(generate_iterator, gmres_restart, args, kwargs) end @@ -49,9 +49,9 @@ LinearSolve.default_alias_A(::IterativeSolversJL, ::Any, ::Any) = true LinearSolve.default_alias_b(::IterativeSolversJL, ::Any, ::Any) = true function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, maxiters::Int, - abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) + abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) restart = (alg.gmres_restart == 0) ? min(20, size(A, 1)) : alg.gmres_restart s = :idrs_s in keys(alg.kwargs) ? alg.kwargs.idrs_s : 4 # shadow space @@ -69,10 +69,10 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max elseif alg.generate_iterator === IterativeSolvers.idrs_iterable! !!LinearSolve._isidentity_struct(Pr) && @warn "$(alg.generate_iterator) doesn't support right preconditioning" - history = IterativeSolvers.ConvergenceHistory(partial=true) + history = IterativeSolvers.ConvergenceHistory(partial = true) history[:abstol] = abstol history[:reltol] = reltol - IterativeSolvers.idrs_iterable!(history, u, A, b, s, Pl, abstol, reltol, maxiters; + IterativeSolvers.idrs_iterable!(history, u, A, b, s, Pl, abstol, reltol, maxiters; alg.kwargs...) elseif alg.generate_iterator === IterativeSolvers.bicgstabl_iterator! !!LinearSolve._isidentity_struct(Pr) && @@ -110,7 +110,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::IterativeSolversJL; kwargs... end cache.verbose && println() - resid = cache.cacheval isa IterativeSolvers.IDRSIterable ? cache.cacheval.R : cache.cacheval.residual + resid = cache.cacheval isa IterativeSolvers.IDRSIterable ? cache.cacheval.R : + cache.cacheval.residual if resid isa IterativeSolvers.Residual resid = resid.current end diff --git a/ext/LinearSolveKrylovKitExt.jl b/ext/LinearSolveKrylovKitExt.jl index 71aa5dddf..a26e26688 100644 --- a/ext/LinearSolveKrylovKitExt.jl +++ b/ext/LinearSolveKrylovKitExt.jl @@ -4,8 +4,8 @@ using LinearSolve, KrylovKit, LinearAlgebra using LinearSolve: LinearCache function LinearSolve.KrylovKitJL(args...; - KrylovAlg = KrylovKit.GMRES, gmres_restart = 0, - kwargs...) + KrylovAlg = KrylovKit.GMRES, gmres_restart = 0, + kwargs...) return KrylovKitJL(KrylovAlg, gmres_restart, args, kwargs) end diff --git a/ext/LinearSolveMetalExt.jl b/ext/LinearSolveMetalExt.jl index 2e963f514..036ffa9cd 100644 --- a/ext/LinearSolveMetalExt.jl +++ b/ext/LinearSolveMetalExt.jl @@ -9,13 +9,13 @@ default_alias_A(::MetalLUFactorization, ::Any, ::Any) = false default_alias_b(::MetalLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MetalLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end function SciMLBase.solve!(cache::LinearCache, alg::MetalLUFactorization; - kwargs...) + kwargs...) A = cache.A A = convert(AbstractMatrix, A) if cache.isfresh diff --git a/ext/LinearSolvePardisoExt.jl b/ext/LinearSolvePardisoExt.jl index da7feaf61..60312ce66 100644 --- a/ext/LinearSolvePardisoExt.jl +++ b/ext/LinearSolvePardisoExt.jl @@ -12,16 +12,16 @@ LinearSolve.needs_concrete_A(alg::PardisoJL) = true # TODO schur complement functionality function LinearSolve.init_cacheval(alg::PardisoJL, - A, - b, - u, - Pl, - Pr, - maxiters::Int, - abstol, - reltol, - verbose::Bool, - assumptions::LinearSolve.OperatorAssumptions) + A, + b, + u, + Pl, + Pr, + maxiters::Int, + abstol, + reltol, + verbose::Bool, + assumptions::LinearSolve.OperatorAssumptions) @unpack nprocs, solver_type, matrix_type, iparm, dparm = alg A = convert(AbstractMatrix, A) diff --git a/ext/LinearSolveRecursiveArrayToolsExt.jl b/ext/LinearSolveRecursiveArrayToolsExt.jl index 98da9583c..468f53e84 100644 --- a/ext/LinearSolveRecursiveArrayToolsExt.jl +++ b/ext/LinearSolveRecursiveArrayToolsExt.jl @@ -5,7 +5,7 @@ import LinearSolve: init_cacheval # Krylov.jl tries to init with `ArrayPartition(undef, ...)`. Avoid hitting that! function init_cacheval(alg::LinearSolve.KrylovJL, A, b::ArrayPartition, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, ::LinearSolve.OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, ::LinearSolve.OperatorAssumptions) return nothing end diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 7ffaadf9c..efec6be98 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -29,8 +29,8 @@ PrecompileTools.@recompile_invalidations begin using LinearAlgebra: BlasInt, LU using LinearAlgebra.LAPACK: require_one_based_indexing, - chkfinite, chkstride1, - @blasfunc, chkargsok + chkfinite, chkstride1, + @blasfunc, chkargsok import GPUArraysCore import Preferences @@ -128,7 +128,7 @@ include("extension_algs.jl") include("deprecated.jl") @generated function SciMLBase.solve!(cache::LinearCache, alg::AbstractFactorization; - kwargs...) + kwargs...) quote if cache.isfresh fact = do_factorization(alg, cache.A, cache.b, cache.u) @@ -214,19 +214,19 @@ PrecompileTools.@compile_workload begin end export LUFactorization, SVDFactorization, QRFactorization, GenericFactorization, - GenericLUFactorization, SimpleLUFactorization, RFLUFactorization, - NormalCholeskyFactorization, NormalBunchKaufmanFactorization, - UMFPACKFactorization, KLUFactorization, FastLUFactorization, FastQRFactorization, - SparspakFactorization, DiagonalFactorization, CholeskyFactorization, - BunchKaufmanFactorization, CHOLMODFactorization, LDLtFactorization + GenericLUFactorization, SimpleLUFactorization, RFLUFactorization, + NormalCholeskyFactorization, NormalBunchKaufmanFactorization, + UMFPACKFactorization, KLUFactorization, FastLUFactorization, FastQRFactorization, + SparspakFactorization, DiagonalFactorization, CholeskyFactorization, + BunchKaufmanFactorization, CHOLMODFactorization, LDLtFactorization export LinearSolveFunction, DirectLdiv! export KrylovJL, KrylovJL_CG, KrylovJL_MINRES, KrylovJL_GMRES, - KrylovJL_BICGSTAB, KrylovJL_LSMR, KrylovJL_CRAIGMR, - IterativeSolversJL, IterativeSolversJL_CG, IterativeSolversJL_GMRES, - IterativeSolversJL_BICGSTAB, IterativeSolversJL_MINRES, IterativeSolversJL_IDRS, - KrylovKitJL, KrylovKitJL_CG, KrylovKitJL_GMRES + KrylovJL_BICGSTAB, KrylovJL_LSMR, KrylovJL_CRAIGMR, + IterativeSolversJL, IterativeSolversJL_CG, IterativeSolversJL_GMRES, + IterativeSolversJL_BICGSTAB, IterativeSolversJL_MINRES, IterativeSolversJL_IDRS, + KrylovKitJL, KrylovKitJL_CG, KrylovKitJL_GMRES export SimpleGMRES diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index 44bb07dcb..917ad9c4a 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -31,9 +31,9 @@ else end function aa_getrf!(A::AbstractMatrix{<:ComplexF64}; - ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), - info = Ref{Cint}(), - check = false) + ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), + info = Ref{Cint}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -51,9 +51,9 @@ function aa_getrf!(A::AbstractMatrix{<:ComplexF64}; end function aa_getrf!(A::AbstractMatrix{<:ComplexF32}; - ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), - info = Ref{Cint}(), - check = false) + ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), + info = Ref{Cint}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -71,9 +71,9 @@ function aa_getrf!(A::AbstractMatrix{<:ComplexF32}; end function aa_getrf!(A::AbstractMatrix{<:Float64}; - ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), - info = Ref{Cint}(), - check = false) + ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), + info = Ref{Cint}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -91,9 +91,9 @@ function aa_getrf!(A::AbstractMatrix{<:Float64}; end function aa_getrf!(A::AbstractMatrix{<:Float32}; - ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), - info = Ref{Cint}(), - check = false) + ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))), + info = Ref{Cint}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -112,10 +112,10 @@ function aa_getrf!(A::AbstractMatrix{<:Float32}; end function aa_getrs!(trans::AbstractChar, - A::AbstractMatrix{<:ComplexF64}, - ipiv::AbstractVector{Cint}, - B::AbstractVecOrMat{<:ComplexF64}; - info = Ref{Cint}()) + A::AbstractMatrix{<:ComplexF64}, + ipiv::AbstractVector{Cint}, + B::AbstractVecOrMat{<:ComplexF64}; + info = Ref{Cint}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -136,10 +136,10 @@ function aa_getrs!(trans::AbstractChar, end function aa_getrs!(trans::AbstractChar, - A::AbstractMatrix{<:ComplexF32}, - ipiv::AbstractVector{Cint}, - B::AbstractVecOrMat{<:ComplexF32}; - info = Ref{Cint}()) + A::AbstractMatrix{<:ComplexF32}, + ipiv::AbstractVector{Cint}, + B::AbstractVecOrMat{<:ComplexF32}; + info = Ref{Cint}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -161,10 +161,10 @@ function aa_getrs!(trans::AbstractChar, end function aa_getrs!(trans::AbstractChar, - A::AbstractMatrix{<:Float64}, - ipiv::AbstractVector{Cint}, - B::AbstractVecOrMat{<:Float64}; - info = Ref{Cint}()) + A::AbstractMatrix{<:Float64}, + ipiv::AbstractVector{Cint}, + B::AbstractVecOrMat{<:Float64}; + info = Ref{Cint}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -186,10 +186,10 @@ function aa_getrs!(trans::AbstractChar, end function aa_getrs!(trans::AbstractChar, - A::AbstractMatrix{<:Float32}, - ipiv::AbstractVector{Cint}, - B::AbstractVecOrMat{<:Float32}; - info = Ref{Cint}()) + A::AbstractMatrix{<:Float32}, + ipiv::AbstractVector{Cint}, + B::AbstractVecOrMat{<:Float32}; + info = Ref{Cint}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -220,21 +220,22 @@ const PREALLOCATED_APPLE_LU = begin end function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_APPLE_LU end -function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A::AbstractMatrix{<:Union{Float32,ComplexF32,ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, + A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) luinst = ArrayInterface.lu_instance(A) LU(luinst.factors, similar(A, Cint, 0), luinst.info), Ref{Cint}() end function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorization; - kwargs...) + kwargs...) A = cache.A A = convert(AbstractMatrix, A) if cache.isfresh diff --git a/src/common.jl b/src/common.jl index 1f4d50e2e..a14e976a8 100644 --- a/src/common.jl +++ b/src/common.jl @@ -7,7 +7,7 @@ can be dependent on the condition number of the matrix. The condition number can ```julia using LinearAlgebra -cond(rand(100,100)) +cond(rand(100, 100)) ``` However, in practice this computation is very expensive and thus not possible for most practical cases. @@ -59,7 +59,7 @@ struct OperatorAssumptions{T} end function OperatorAssumptions(issquare = nothing; - condition::OperatorCondition.T = OperatorCondition.IllConditioned) + condition::OperatorCondition.T = OperatorCondition.IllConditioned) OperatorAssumptions{typeof(issquare)}(issquare, condition) end __issquare(assump::OperatorAssumptions) = assump.issq @@ -128,17 +128,17 @@ end __init_u0_from_Ab(::SMatrix{S1, S2}, b) where {S1, S2} = zeros(SVector{S2, eltype(b)}) function SciMLBase.init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, - args...; - alias_A = default_alias_A(alg, prob.A, prob.b), - alias_b = default_alias_b(alg, prob.A, prob.b), - abstol = default_tol(real(eltype(prob.b))), - reltol = default_tol(real(eltype(prob.b))), - maxiters::Int = length(prob.b), - verbose::Bool = false, - Pl = IdentityOperator(size(prob.A)[1]), - Pr = IdentityOperator(size(prob.A)[2]), - assumptions = OperatorAssumptions(issquare(prob.A)), - kwargs...) + args...; + alias_A = default_alias_A(alg, prob.A, prob.b), + alias_b = default_alias_b(alg, prob.A, prob.b), + abstol = default_tol(real(eltype(prob.b))), + reltol = default_tol(real(eltype(prob.b))), + maxiters::Int = length(prob.b), + verbose::Bool = false, + Pl = IdentityOperator(size(prob.A)[1]), + Pr = IdentityOperator(size(prob.A)[2]), + assumptions = OperatorAssumptions(issquare(prob.A)), + kwargs...) @unpack A, b, u0, p = prob A = if alias_A || A isa SMatrix @@ -181,8 +181,8 @@ function SciMLBase.solve(prob::LinearProblem, args...; kwargs...) end function SciMLBase.solve(prob::LinearProblem, - alg::Union{SciMLLinearSolveAlgorithm, Nothing}, - args...; kwargs...) + alg::Union{SciMLLinearSolveAlgorithm, Nothing}, + args...; kwargs...) solve!(init(prob, alg, args...; kwargs...)) end diff --git a/src/default.jl b/src/default.jl index 9a76403bb..4621edcff 100644 --- a/src/default.jl +++ b/src/default.jl @@ -30,7 +30,7 @@ end newex = quote setfield!(cache, $(Meta.quot(alg)), v) end - alg_enum = getproperty(LinearSolve.DefaultAlgorithmChoice, alg) + alg_enum = getproperty(LinearSolve.DefaultAlgorithmChoice, alg) ex = if ex == :() Expr(:elseif, :(alg.alg == $(alg_enum)), newex, :(error("Algorithm Choice not Allowed"))) @@ -46,7 +46,7 @@ end defaultalg(A, b) = defaultalg(A, b, OperatorAssumptions(true)) function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, - assump::OperatorAssumptions{Bool}) + assump::OperatorAssumptions{Bool}) defaultalg(A.A, b, assump) end @@ -92,12 +92,13 @@ function defaultalg(A::Symmetric{<:Number, <:Array}, b, ::OperatorAssumptions{Bo DefaultLinearSolver(DefaultAlgorithmChoice.BunchKaufmanFactorization) end -function defaultalg(A::Symmetric{<:Number, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) +function defaultalg( + A::Symmetric{<:Number, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) DefaultLinearSolver(DefaultAlgorithmChoice.CHOLMODFactorization) end function defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, - assump::OperatorAssumptions{Bool}) where {Tv, Ti} + assump::OperatorAssumptions{Bool}) where {Tv, Ti} if assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.SparspakFactorization) else @@ -107,7 +108,7 @@ end @static if INCLUDE_SPARSE function defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, - assump::OperatorAssumptions{Bool}) where {Ti} + assump::OperatorAssumptions{Bool}) where {Ti} if assump.issq if length(b) <= 10_000 && length(nonzeros(A)) / length(A) < 2e-4 DefaultLinearSolver(DefaultAlgorithmChoice.KLUFactorization) @@ -129,7 +130,8 @@ function defaultalg(A::GPUArraysCore.AnyGPUArray, b, assump::OperatorAssumptions end # A === nothing case -function defaultalg(A::Nothing, b::GPUArraysCore.AnyGPUArray, assump::OperatorAssumptions{Bool}) +function defaultalg( + A::Nothing, b::GPUArraysCore.AnyGPUArray, assump::OperatorAssumptions{Bool}) if assump.condition === OperatorCondition.IllConditioned || !assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) else @@ -139,7 +141,7 @@ end # Ambiguity handling function defaultalg(A::GPUArraysCore.AnyGPUArray, b::GPUArraysCore.AnyGPUArray, - assump::OperatorAssumptions{Bool}) + assump::OperatorAssumptions{Bool}) if assump.condition === OperatorCondition.IllConditioned || !assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) else @@ -148,7 +150,7 @@ function defaultalg(A::GPUArraysCore.AnyGPUArray, b::GPUArraysCore.AnyGPUArray, end function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, - assump::OperatorAssumptions{Bool}) + assump::OperatorAssumptions{Bool}) if has_ldiv!(A) return DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) elseif !assump.issq @@ -180,12 +182,12 @@ function defaultalg(A, b, assump::OperatorAssumptions{Bool}) elseif appleaccelerate_isavailable() DefaultAlgorithmChoice.AppleAccelerateLUFactorization elseif (length(b) <= 100 || (isopenblas() && length(b) <= 500) || - (usemkl && length(b) <= 200)) && + (usemkl && length(b) <= 200)) && (A === nothing ? eltype(b) <: Union{Float32, Float64} : eltype(A) <: Union{Float32, Float64}) DefaultAlgorithmChoice.RFLUFactorization - #elseif A === nothing || A isa Matrix - # alg = FastLUFactorization() + #elseif A === nothing || A isa Matrix + # alg = FastLUFactorization() elseif usemkl DefaultAlgorithmChoice.MKLLUFactorization else @@ -196,7 +198,7 @@ function defaultalg(A, b, assump::OperatorAssumptions{Bool}) elseif __conditioning(assump) === OperatorCondition.SuperIllConditioned DefaultAlgorithmChoice.SVDFactorization elseif usemkl && (A === nothing ? eltype(b) <: BLASELTYPES : - eltype(A) <: BLASELTYPES) + eltype(A) <: BLASELTYPES) DefaultAlgorithmChoice.MKLLUFactorization else DefaultAlgorithmChoice.LUFactorization @@ -286,21 +288,22 @@ end ## Catch high level interface function SciMLBase.init(prob::LinearProblem, alg::Nothing, - args...; - assumptions = OperatorAssumptions(issquare(prob.A)), - kwargs...) - SciMLBase.init(prob, defaultalg(prob.A, prob.b, assumptions), args...; assumptions, kwargs...) + args...; + assumptions = OperatorAssumptions(issquare(prob.A)), + kwargs...) + SciMLBase.init( + prob, defaultalg(prob.A, prob.b, assumptions), args...; assumptions, kwargs...) end function SciMLBase.solve!(cache::LinearCache, alg::Nothing, - args...; assump::OperatorAssumptions = OperatorAssumptions(), - kwargs...) + args...; assump::OperatorAssumptions = OperatorAssumptions(), + kwargs...) @unpack A, b = cache SciMLBase.solve!(cache, defaultalg(A, b, assump), args...; kwargs...) end function init_cacheval(alg::Nothing, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + verbose::Bool, assump::OperatorAssumptions) init_cacheval(defaultalg(A, b, assump), A, b, u, Pl, Pr, maxiters, abstol, reltol, verbose, assump) @@ -310,8 +313,8 @@ end cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ @generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + abstol, reltol, + verbose::Bool, assump::OperatorAssumptions) caches = map(first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))) do alg if alg === :KrylovJL_GMRES || alg === :KrylovJL_CRAIGMR || alg === :KrylovJL_LSMR quote @@ -345,15 +348,15 @@ defaultalg_symbol(::Type{<:QRFactorization{ColumnNorm}}) = :QRFactorizationPivot """ if alg.alg === DefaultAlgorithmChoice.LUFactorization - SciMLBase.solve!(cache, LUFactorization(), args...; kwargs...)) +SciMLBase.solve!(cache, LUFactorization(), args...; kwargs...)) else - ... +... end """ @generated function SciMLBase.solve!(cache::LinearCache, alg::DefaultLinearSolver, - args...; - assump::OperatorAssumptions = OperatorAssumptions(), - kwargs...) + args...; + assump::OperatorAssumptions = OperatorAssumptions(), + kwargs...) ex = :() for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) newex = quote @@ -362,7 +365,7 @@ end retcode = sol.retcode, iters = sol.iters, stats = sol.stats) end - alg_enum = getproperty(LinearSolve.DefaultAlgorithmChoice, alg) + alg_enum = getproperty(LinearSolve.DefaultAlgorithmChoice, alg) ex = if ex == :() Expr(:elseif, :(alg.alg == $(alg_enum)), newex, :(error("Algorithm Choice not Allowed"))) @@ -389,7 +392,7 @@ end DefaultAlgorithmChoice.AppleAccelerateLUFactorization, DefaultAlgorithmChoice.RFLUFactorization)) quote - getproperty(cache.cacheval,$(Meta.quot(alg)))[1]' \ dy + getproperty(cache.cacheval, $(Meta.quot(alg)))[1]' \ dy end elseif alg in Symbol.((DefaultAlgorithmChoice.LUFactorization, DefaultAlgorithmChoice.QRFactorization, @@ -405,9 +408,11 @@ end DefaultAlgorithmChoice.QRFactorizationPivoted, DefaultAlgorithmChoice.GenericLUFactorization)) quote - getproperty(cache.cacheval,$(Meta.quot(alg)))' \ dy + getproperty(cache.cacheval, $(Meta.quot(alg)))' \ dy end - elseif alg in Symbol.((DefaultAlgorithmChoice.KrylovJL_GMRES,DefaultAlgorithmChoice.KrylovJL_LSMR, DefaultAlgorithmChoice.KrylovJL_CRAIGMR)) + elseif alg in Symbol.(( + DefaultAlgorithmChoice.KrylovJL_GMRES, DefaultAlgorithmChoice.KrylovJL_LSMR, + DefaultAlgorithmChoice.KrylovJL_CRAIGMR)) quote invprob = LinearSolve.LinearProblem(transpose(cache.A), dy) solve(invprob, cache.alg; @@ -422,10 +427,15 @@ end end ex = if ex == :() - Expr(:elseif, :(getproperty(DefaultAlgorithmChoice, $(Meta.quot(alg))) === cache.alg.alg), newex, + Expr(:elseif, + :(getproperty(DefaultAlgorithmChoice, $(Meta.quot(alg))) === cache.alg.alg), + newex, :(error("Algorithm Choice not Allowed"))) else - Expr(:elseif, :(getproperty(DefaultAlgorithmChoice, $(Meta.quot(alg))) === cache.alg.alg), newex, ex) + Expr(:elseif, + :(getproperty(DefaultAlgorithmChoice, $(Meta.quot(alg))) === cache.alg.alg), + newex, + ex) end end ex = Expr(:if, ex.args...) diff --git a/src/deprecated.jl b/src/deprecated.jl index a9bd5a846..19623c4c0 100644 --- a/src/deprecated.jl +++ b/src/deprecated.jl @@ -79,6 +79,7 @@ function set_cacheval(cache::LinearCache, alg_cache) end end -@deprecate SciMLBase.solve(cache::LinearCache, args...; kwargs...) SciMLBase.solve!(cache::LinearCache, +@deprecate SciMLBase.solve(cache::LinearCache, args...; kwargs...) SciMLBase.solve!( + cache::LinearCache, args...; kwargs...) false diff --git a/src/extension_algs.jl b/src/extension_algs.jl index d2e968911..4c2d4fecb 100644 --- a/src/extension_algs.jl +++ b/src/extension_algs.jl @@ -15,7 +15,7 @@ alternatively pass an already created solver to `HYPREAlgorithm` (and to the `Pl argument). See HYPRE.jl docs for how to set up solvers with specific options. !!! note - + Using HYPRE solvers requires Julia version 1.9 or higher, and that the package HYPRE.jl is installed. @@ -23,20 +23,20 @@ argument). See HYPRE.jl docs for how to set up solvers with specific options. The single positional argument `solver` has the following choices: -- `HYPRE.BiCGSTAB` -- `HYPRE.BoomerAMG` -- `HYPRE.FlexGMRES` -- `HYPRE.GMRES` -- `HYPRE.Hybrid` -- `HYPRE.ILU` -- `HYPRE.ParaSails` (as preconditioner only) -- `HYPRE.PCG` + - `HYPRE.BiCGSTAB` + - `HYPRE.BoomerAMG` + - `HYPRE.FlexGMRES` + - `HYPRE.GMRES` + - `HYPRE.Hybrid` + - `HYPRE.ILU` + - `HYPRE.ParaSails` (as preconditioner only) + - `HYPRE.PCG` ## Keyword Arguments -* `Pl`: A choice of left preconditioner. + - `Pl`: A choice of left preconditioner. -## Example +## Example For example, to use `HYPRE.PCG` as the solver, with `HYPRE.BoomerAMG` as the preconditioner, the algorithm should be defined as follows: @@ -64,11 +64,11 @@ end """ `CudaOffloadFactorization()` -An offloading technique used to GPU-accelerate CPU-based computations. +An offloading technique used to GPU-accelerate CPU-based computations. Requires a sufficiently large `A` to overcome the data transfer costs. !!! note - + Using this solver requires adding the package CUDA.jl, i.e. `using CUDA` """ struct CudaOffloadFactorization <: LinearSolve.AbstractFactorization @@ -85,15 +85,15 @@ end """ ```julia MKLPardisoFactorize(; nprocs::Union{Int, Nothing} = nothing, - matrix_type = nothing, - iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, - dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) + matrix_type = nothing, + iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, + dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) ``` A sparse factorization method using MKL Pardiso. !!! note - + Using this solver requires adding the package Pardiso.jl, i.e. `using Pardiso` ## Keyword Arguments @@ -108,15 +108,15 @@ MKLPardisoFactorize(; kwargs...) = PardisoJL(; solver_type = 0, kwargs...) """ ```julia MKLPardisoIterate(; nprocs::Union{Int, Nothing} = nothing, - matrix_type = nothing, - iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, - dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) + matrix_type = nothing, + iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, + dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) ``` A mixed factorization+iterative method using MKL Pardiso. !!! note - + Using this solver requires adding the package Pardiso.jl, i.e. `using Pardiso` ## Keyword Arguments @@ -131,10 +131,10 @@ MKLPardisoIterate(; kwargs...) = PardisoJL(; solver_type = 1, kwargs...) """ ```julia PardisoJL(; nprocs::Union{Int, Nothing} = nothing, - solver_type = nothing, - matrix_type = nothing, - iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, - dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) + solver_type = nothing, + matrix_type = nothing, + iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, + dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) ``` A generic method using MKL Pardiso. Specifying `solver_type` is required. @@ -158,10 +158,10 @@ struct PardisoJL{T1, T2} <: LinearSolve.SciMLLinearSolveAlgorithm dparm::Union{Vector{Tuple{Int, Int}}, Nothing} function PardisoJL(; nprocs::Union{Int, Nothing} = nothing, - solver_type = nothing, - matrix_type = nothing, - iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, - dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) + solver_type = nothing, + matrix_type = nothing, + iparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing, + dparm::Union{Vector{Tuple{Int, Int}}, Nothing} = nothing) ext = Base.get_extension(@__MODULE__, :LinearSolvePardisoExt) if ext === nothing error("PardisoJL requires that Pardiso is loaded, i.e. `using Pardiso`") @@ -184,7 +184,7 @@ A generic iterative solver implementation allowing the choice of KrylovKit.jl solvers. !!! note - + Using this solver requires adding the package KrylovKit.jl, i.e. `using KrylovKit` """ struct KrylovKitJL{F, A, I, K} <: LinearSolve.AbstractKrylovSubspaceMethod @@ -202,8 +202,8 @@ KrylovKitJL_CG(args...; Pl = nothing, Pr = nothing, kwargs...) A generic CG implementation for Hermitian and positive definite linear systems !!! note - - Using this solver requires adding the package KrylovKit.jl, i.e. `using KrylovKit` + + Using this solver requires adding the package KrylovKit.jl, i.e. `using KrylovKit` """ function KrylovKitJL_CG end @@ -215,7 +215,7 @@ KrylovKitJL_GMRES(args...; Pl = nothing, Pr = nothing, gmres_restart = 0, kwargs A generic GMRES implementation. !!! note - + Using this solver requires adding the package KrylovKit.jl, i.e. `using KrylovKit` """ function KrylovKitJL_GMRES end @@ -223,18 +223,16 @@ function KrylovKitJL_GMRES end """ ```julia IterativeSolversJL(args...; - generate_iterator = IterativeSolvers.gmres_iterable!, - Pl = nothing, Pr = nothing, - gmres_restart = 0, kwargs...) + generate_iterator = IterativeSolvers.gmres_iterable!, + Pl = nothing, Pr = nothing, + gmres_restart = 0, kwargs...) ``` A generic wrapper over the IterativeSolvers.jl solvers. - !!! note - - Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` + Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` """ struct IterativeSolversJL{F, I, A, K} <: LinearSolve.AbstractKrylovSubspaceMethod generate_iterator::F @@ -251,24 +249,21 @@ IterativeSolversJL_CG(args...; Pl = nothing, Pr = nothing, kwargs...) A wrapper over the IterativeSolvers.jl CG. !!! note - - Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` + Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` """ function IterativeSolversJL_CG end """ ```julia -IterativeSolversJL_GMRES(args...; Pl = nothing, Pr = nothing, gmres_restart=0, kwargs...) +IterativeSolversJL_GMRES(args...; Pl = nothing, Pr = nothing, gmres_restart = 0, kwargs...) ``` A wrapper over the IterativeSolvers.jl GMRES. - !!! note - - Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` + Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` """ function IterativeSolversJL_GMRES end @@ -279,11 +274,9 @@ IterativeSolversJL_IDRS(args...; Pl = nothing, kwargs...) A wrapper over the IterativeSolvers.jl IDR(S). - !!! note - - Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` + Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` """ function IterativeSolversJL_IDRS end @@ -294,11 +287,9 @@ IterativeSolversJL_BICGSTAB(args...; Pl = nothing, Pr = nothing, kwargs...) A wrapper over the IterativeSolvers.jl BICGSTAB. - !!! note - - Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` + Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` """ function IterativeSolversJL_BICGSTAB end @@ -309,11 +300,9 @@ IterativeSolversJL_MINRES(args...; Pl = nothing, Pr = nothing, kwargs...) A wrapper over the IterativeSolvers.jl MINRES. - !!! note - - Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` + Using this solver requires adding the package IterativeSolvers.jl, i.e. `using IterativeSolvers` """ function IterativeSolversJL_MINRES end diff --git a/src/factorization.jl b/src/factorization.jl index 1f244b3c4..0ef7b754e 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -25,7 +25,7 @@ end # RF Bad fallback: will fail if `A` is just a stand-in # This should instead just create the factorization type. function init_cacheval(alg::AbstractFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::Bool, assumptions::OperatorAssumptions) do_factorization(alg, convert(AbstractMatrix, A), b, u) end @@ -36,18 +36,18 @@ end Julia's built in `lu`. Equivalent to calling `lu!(A)` -* On dense matrices, this uses the current BLAS implementation of the user's computer, -which by default is OpenBLAS but will use MKL if the user does `using MKL` in their -system. -* On sparse matrices, this will use UMFPACK from SparseArrays. Note that this will not -cache the symbolic factorization. -* On CuMatrix, it will use a CUDA-accelerated LU from CuSolver. -* On BandedMatrix and BlockBandedMatrix, it will use a banded LU. + - On dense matrices, this uses the current BLAS implementation of the user's computer, + which by default is OpenBLAS but will use MKL if the user does `using MKL` in their + system. + - On sparse matrices, this will use UMFPACK from SparseArrays. Note that this will not + cache the symbolic factorization. + - On CuMatrix, it will use a CUDA-accelerated LU from CuSolver. + - On BandedMatrix and BlockBandedMatrix, it will use a banded LU. ## Positional Arguments -* pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is - `LinearAlgebra.NoPivot()`. + - pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is + `LinearAlgebra.NoPivot()`. """ struct LUFactorization{P} <: AbstractFactorization pivot::P @@ -62,8 +62,8 @@ Has low overhead and is good for small matrices. ## Positional Arguments -* pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is - `LinearAlgebra.NoPivot()`. + - pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is + `LinearAlgebra.NoPivot()`. """ struct GenericLUFactorization{P} <: AbstractFactorization pivot::P @@ -94,36 +94,37 @@ function do_factorization(alg::GenericLUFactorization, A, b, u) return fact end -function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval( + alg::Union{LUFactorization, GenericLUFactorization}, A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, - A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) if alg isa LUFactorization - return lu(A; check=false) + return lu(A; check = false) else A isa GPUArraysCore.AnyGPUArray && return nothing - return LinearAlgebra.generic_lufact!(copy(A), alg.pivot; check=false) + return LinearAlgebra.generic_lufact!(copy(A), alg.pivot; check = false) end end const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1, 1)) function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, - A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_LU end function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, - A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::AbstractSciMLOperator, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -134,12 +135,12 @@ end Julia's built in `qr`. Equivalent to calling `qr!(A)`. -* On dense matrices, this uses the current BLAS implementation of the user's computer -which by default is OpenBLAS but will use MKL if the user does `using MKL` in their -system. -* On sparse matrices, this will use SPQR from SparseArrays -* On CuMatrix, it will use a CUDA-accelerated QR from CuSolver. -* On BandedMatrix and BlockBandedMatrix, it will use a banded QR. + - On dense matrices, this uses the current BLAS implementation of the user's computer + which by default is OpenBLAS but will use MKL if the user does `using MKL` in their + system. + - On sparse matrices, this will use SPQR from SparseArrays + - On CuMatrix, it will use a CUDA-accelerated QR from CuSolver. + - On BandedMatrix and BlockBandedMatrix, it will use a banded QR. """ struct QRFactorization{P} <: AbstractFactorization pivot::P @@ -168,20 +169,21 @@ function do_factorization(alg::QRFactorization, A, b, u) end function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) end const PREALLOCATED_QR_ColumnNorm = ArrayInterface.qr_instance(rand(1, 1), ColumnNorm()) function init_cacheval(alg::QRFactorization{ColumnNorm}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) return PREALLOCATED_QR_ColumnNorm end -function init_cacheval(alg::QRFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::QRFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) A isa GPUArraysCore.AnyGPUArray && return qr(A) return qr(A, alg.pivot) end @@ -189,13 +191,13 @@ end const PREALLOCATED_QR_NoPivot = ArrayInterface.qr_instance(rand(1, 1)) function init_cacheval(alg::QRFactorization{NoPivot}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) return PREALLOCATED_QR_NoPivot end function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -208,10 +210,10 @@ Julia's built in `cholesky`. Equivalent to calling `cholesky!(A)`. ## Keyword Arguments -* pivot: defaluts to NoPivot, can also be RowMaximum. -* tol: the tol argument in CHOLMOD. Only used for sparse matrices. -* shift: the shift argument in CHOLMOD. Only used for sparse matrices. -* perm: the perm argument in CHOLMOD. Only used for sparse matrices. + - pivot: defaluts to NoPivot, can also be RowMaximum. + - tol: the tol argument in CHOLMOD. Only used for sparse matrices. + - shift: the shift argument in CHOLMOD. Only used for sparse matrices. + - perm: the perm argument in CHOLMOD. Only used for sparse matrices. """ struct CholeskyFactorization{P, P2} <: AbstractFactorization pivot::P @@ -240,33 +242,33 @@ function do_factorization(alg::CholeskyFactorization, A, b, u) end function init_cacheval(alg::CholeskyFactorization, A::SMatrix{S1, S2}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {S1, S2} + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {S1, S2} cholesky(A) end function init_cacheval(alg::CholeskyFactorization, A::GPUArraysCore.AnyGPUArray, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - cholesky(A; check=false) + Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + cholesky(A; check = false) end function init_cacheval(alg::CholeskyFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) end const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::CholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_CHOLESKY end function init_cacheval(alg::CholeskyFactorization, - A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -292,14 +294,14 @@ function do_factorization(alg::LDLtFactorization, A, b, u) end function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::LDLtFactorization, A::SymTridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.ldlt_instance(convert(AbstractMatrix, A)) end @@ -310,9 +312,9 @@ end Julia's built in `svd`. Equivalent to `svd!(A)`. -* On dense matrices, this uses the current BLAS implementation of the user's computer -which by default is OpenBLAS but will use MKL if the user does `using MKL` in their -system. + - On dense matrices, this uses the current BLAS implementation of the user's computer + which by default is OpenBLAS but will use MKL if the user does `using MKL` in their + system. """ struct SVDFactorization{A} <: AbstractFactorization full::Bool @@ -332,22 +334,22 @@ function do_factorization(alg::SVDFactorization, A, b, u) end function init_cacheval(alg::SVDFactorization, A::Union{Matrix, SMatrix}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.svd_instance(convert(AbstractMatrix, A)) end const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1, 1)) function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_SVD end function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -361,7 +363,7 @@ Only for Symmetric matrices. ## Keyword Arguments -* rook: whether to perform rook pivoting. Defaults to false. + - rook: whether to perform rook pivoting. Defaults to false. """ Base.@kwdef struct BunchKaufmanFactorization <: AbstractFactorization rook::Bool = false @@ -374,9 +376,9 @@ function do_factorization(alg::BunchKaufmanFactorization, A, b, u) end function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number, <:Matrix}, b, - u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end @@ -384,15 +386,15 @@ const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric 1))) function init_cacheval(alg::BunchKaufmanFactorization, - A::Symmetric{Float64, Matrix{Float64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Symmetric{Float64, Matrix{Float64}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_BUNCHKAUFMAN end function init_cacheval(alg::BunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -400,8 +402,8 @@ end """ `GenericFactorization(;fact_alg=LinearAlgebra.factorize)`: Constructs a linear solver from a generic - factorization algorithm `fact_alg` which complies with the Base.LinearAlgebra - factorization API. Quoting from Base: +factorization algorithm `fact_alg` which complies with the Base.LinearAlgebra +factorization API. Quoting from Base: * If `A` is upper or lower triangular (or diagonal), no factorization of `A` is required. The system is then solved with either forward or backward substitution. @@ -414,8 +416,8 @@ end ## Keyword Arguments -* fact_alg: the factorization algorithm to use. Defaults to `LinearAlgebra.factorize`, but can be - swapped to choices like `lu`, `qr` + - fact_alg: the factorization algorithm to use. Defaults to `LinearAlgebra.factorize`, but can be + swapped to choices like `lu`, `qr` """ struct GenericFactorization{F} <: AbstractFactorization fact_alg::F @@ -429,208 +431,225 @@ function do_factorization(alg::GenericFactorization, A, b, u) return fact end -function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(lu)}, A::AbstractMatrix, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(lu!)}, A::AbstractMatrix, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, - A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, - A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end -function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(lu!)}, A::Tridiagonal, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(lu!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end -function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(lu)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end -function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(qr)}, A::AbstractMatrix, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(qr!)}, A::AbstractMatrix, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(qr)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end -function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(qr!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, - A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, - A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end -function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(qr!)}, A::Tridiagonal, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(svd)}, A::AbstractMatrix, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(svd!)}, A::AbstractMatrix, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, - A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, - A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end -function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(svd)}, A::Tridiagonal, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Tridiagonal, b, u, Pl, - Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(svd!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end -function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(svd)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) end function init_cacheval(alg::GenericFactorization, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) do_factorization(alg, A, b, u) end -function init_cacheval(alg::Union{GenericFactorization{typeof(bunchkaufman!)}, - GenericFactorization{typeof(bunchkaufman)}}, - A::Union{Hermitian, Symmetric}, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::Union{GenericFactorization{typeof(bunchkaufman!)}, + GenericFactorization{typeof(bunchkaufman)}}, + A::Union{Hermitian, Symmetric}, b, u, Pl, Pr, maxiters::Int, abstol, + reltol, verbose::Bool, assumptions::OperatorAssumptions) BunchKaufman(A.data, Array(1:size(A, 1)), A.uplo, true, false, 0) end -function init_cacheval(alg::Union{GenericFactorization{typeof(bunchkaufman!)}, - GenericFactorization{typeof(bunchkaufman)}}, - A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::Union{GenericFactorization{typeof(bunchkaufman!)}, + GenericFactorization{typeof(bunchkaufman)}}, + A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) if eltype(A) <: Complex return bunchkaufman!(Hermitian(A)) else @@ -642,53 +661,60 @@ end # Try to never use it. # Cholesky needs the posdef matrix, for GenericFactorization assume structure is needed -function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::AbstractMatrix, b, u, Pl, Pr, +function init_cacheval( + alg::GenericFactorization{typeof(cholesky)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end -function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::AbstractMatrix, b, u, Pl, Pr, +function init_cacheval( + alg::GenericFactorization{typeof(cholesky!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end -function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, + A::Diagonal, b, u, Pl, Pr, maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end -function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(cholesky!)}, A::Tridiagonal, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(cholesky!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end -function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, + A::Diagonal, b, u, Pl, Pr, maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end -function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval( + alg::GenericFactorization{typeof(cholesky)}, A::Tridiagonal, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end -function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T, V} +function init_cacheval( + alg::GenericFactorization{typeof(cholesky)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end - function init_cacheval(alg::GenericFactorization, - A::Union{Hermitian{T, <:SparseMatrixCSC}, - Symmetric{T, <:SparseMatrixCSC}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) where {T} + A::Union{Hermitian{T, <:SparseMatrixCSC}, + Symmetric{T, <:SparseMatrixCSC}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) where {T} newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end @@ -719,23 +745,23 @@ const PREALLOCATED_UMFPACK = SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0 Int[], Float64[])) function init_cacheval(alg::UMFPACKFactorization, - A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::UMFPACKFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) PREALLOCATED_UMFPACK end function init_cacheval(alg::UMFPACKFactorization, A::AbstractSparseArray, b, u, Pl, Pr, - maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) A = convert(AbstractMatrix, A) zerobased = SparseArrays.getcolptr(A)[1] == 0 return SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(size(A)..., getcolptr(A), @@ -792,23 +818,23 @@ const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0, 0, [1], Int[], Float64[])) function init_cacheval(alg::KLUFactorization, - A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::KLUFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, - Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) PREALLOCATED_KLU end function init_cacheval(alg::KLUFactorization, A::AbstractSparseArray, b, u, Pl, Pr, - maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) A = convert(AbstractMatrix, A) return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) @@ -865,8 +891,8 @@ Only supports sparse matrices. ## Keyword Arguments -* shift: the shift argument in CHOLMOD. -* perm: the perm argument in CHOLMOD + - shift: the shift argument in CHOLMOD. + - perm: the perm argument in CHOLMOD """ Base.@kwdef struct CHOLMODFactorization{T} <: AbstractFactorization shift::Float64 = 0.0 @@ -876,17 +902,18 @@ end const PREALLOCATED_CHOLMOD = cholesky(SparseMatrixCSC(0, 0, [1], Int[], Float64[])) function init_cacheval(alg::CHOLMODFactorization, - A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::CHOLMODFactorization, - A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: Union{Float32, Float64}} + A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) where {T <: + Union{Float32, Float64}} PREALLOCATED_CHOLMOD end @@ -928,34 +955,34 @@ function RFLUFactorization(; pivot = Val(true), thread = Val(true)) end function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) PREALLOCATED_LU, ipiv end function init_cacheval(alg::RFLUFactorization, - A::Union{AbstractSparseArray, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::Union{AbstractSparseArray, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing, nothing end function init_cacheval(alg::RFLUFactorization, - A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing, nothing end function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; - kwargs...) where {P, T} + kwargs...) where {P, T} A = cache.A A = convert(AbstractMatrix, A) fact, ipiv = @get_cacheval(cache, :RFLUFactorization) @@ -963,7 +990,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; if length(ipiv) != min(size(A)...) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) end - fact = RecursiveFactorization.lu!(A, ipiv, Val(P), Val(T), check=false) + fact = RecursiveFactorization.lu!(A, ipiv, Val(P), Val(T), check = false) cache.cacheval = (fact, ipiv) cache.isfresh = false end @@ -982,7 +1009,7 @@ be applied to well-conditioned matrices. ## Positional Arguments -* pivot: Defaults to RowMaximum(), but can be NoPivot() + - pivot: Defaults to RowMaximum(), but can be NoPivot() """ struct NormalCholeskyFactorization{P} <: AbstractFactorization pivot::P @@ -999,38 +1026,39 @@ default_alias_b(::NormalCholeskyFactorization, ::Any, ::Any) = true const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, - A::Union{AbstractSparseArray, GPUArraysCore.AnyGPUArray, - Symmetric{<:Number, <:AbstractSparseArray}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Union{AbstractSparseArray, GPUArraysCore.AnyGPUArray, + Symmetric{<:Number, <:AbstractSparseArray}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A)) end function init_cacheval(alg::NormalCholeskyFactorization, A::SMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) return cholesky(Symmetric((A)' * A)) end function init_cacheval(alg::NormalCholeskyFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) A_ = convert(AbstractMatrix, A) - return ArrayInterface.cholesky_instance(Symmetric(Matrix{eltype(A)}(undef,0,0)), alg.pivot) + return ArrayInterface.cholesky_instance( + Symmetric(Matrix{eltype(A)}(undef, 0, 0)), alg.pivot) end const PREALLOCATED_NORMALCHOLESKY_SYMMETRIC = ArrayInterface.cholesky_instance( Symmetric(rand(1, 1)), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) return PREALLOCATED_NORMALCHOLESKY_SYMMETRIC end function init_cacheval(alg::NormalCholeskyFactorization, - A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -1069,7 +1097,7 @@ be applied to well-conditioned matrices. ## Positional Arguments -* rook: whether to perform rook pivoting. Defaults to false. + - rook: whether to perform rook pivoting. Defaults to false. """ struct NormalBunchKaufmanFactorization <: AbstractFactorization rook::Bool @@ -1083,13 +1111,13 @@ default_alias_A(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true default_alias_b(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true function init_cacheval(alg::NormalBunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end function SciMLBase.solve!(cache::LinearCache, alg::NormalBunchKaufmanFactorization; - kwargs...) + kwargs...) A = cache.A A = convert(AbstractMatrix, A) if cache.isfresh @@ -1111,12 +1139,12 @@ A special implementation only for solving `Diagonal` matrices fast. struct DiagonalFactorization <: AbstractFactorization end function init_cacheval(alg::DiagonalFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing end function SciMLBase.solve!(cache::LinearCache, alg::DiagonalFactorization; - kwargs...) + kwargs...) A = convert(AbstractMatrix, cache.A) if cache.u isa Vector && cache.b isa Vector @simd ivdep for i in eachindex(cache.u) @@ -1147,8 +1175,8 @@ this version does not allow for choice of pivoting method. struct FastLUFactorization <: AbstractFactorization end function init_cacheval(::FastLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ws = LUWs(A) return WorkspaceAndFactors(ws, ArrayInterface.lu_instance(convert(AbstractMatrix, A))) end @@ -1184,30 +1212,30 @@ end FastQRFactorization() = FastQRFactorization(NoPivot(), 36) function init_cacheval(alg::FastQRFactorization{NoPivot}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ws = QRWYWs(A; blocksize = alg.blocksize) return WorkspaceAndFactors(ws, ArrayInterface.qr_instance(convert(AbstractMatrix, A))) end function init_cacheval(::FastQRFactorization{ColumnNorm}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ws = QRpWs(A) return WorkspaceAndFactors(ws, ArrayInterface.qr_instance(convert(AbstractMatrix, A))) end function init_cacheval(alg::FastQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) return init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) end function SciMLBase.solve!(cache::LinearCache, alg::FastQRFactorization{P}; - kwargs...) where {P} + kwargs...) where {P} A = cache.A A = convert(AbstractMatrix, A) ws_and_fact = @get_cacheval(cache, :FastQRFactorization) @@ -1215,10 +1243,12 @@ function SciMLBase.solve!(cache::LinearCache, alg::FastQRFactorization{P}; # we will fail here if A is a different *size* than in a previous version of the same cache. # it may instead be desirable to resize the workspace. if P === NoPivot - @set! ws_and_fact.factors = LinearAlgebra.QRCompactWY(LAPACK.geqrt!(ws_and_fact.workspace, + @set! ws_and_fact.factors = LinearAlgebra.QRCompactWY(LAPACK.geqrt!( + ws_and_fact.workspace, A)...) else - @set! ws_and_fact.factors = LinearAlgebra.QRPivoted(LAPACK.geqp3!(ws_and_fact.workspace, + @set! ws_and_fact.factors = LinearAlgebra.QRPivoted(LAPACK.geqp3!( + ws_and_fact.workspace, A)...) end cache.cacheval = ws_and_fact @@ -1254,25 +1284,26 @@ const PREALLOCATED_SPARSEPAK = sparspaklu(SparseMatrixCSC(0, 0, [1], Int[], Floa factorize = false) function init_cacheval(alg::SparspakFactorization, - A::Union{Matrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + A::Union{Matrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end function init_cacheval(::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, - Pr, maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) + Pr, maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) PREALLOCATED_SPARSEPAK end function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) + reltol, + verbose::Bool, assumptions::OperatorAssumptions) A = convert(AbstractMatrix, A) if A isa SparseArrays.AbstractSparseArray - return sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + return sparspaklu( + SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), factorize = false) else @@ -1282,7 +1313,7 @@ function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int, end function init_cacheval(::SparspakFactorization, ::StaticArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing end @@ -1306,8 +1337,8 @@ end for alg in InteractiveUtils.subtypes(AbstractFactorization) @eval function init_cacheval(alg::$alg, A::MatrixOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) init_cacheval(alg, A.A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) diff --git a/src/factorization_sparse.jl b/src/factorization_sparse.jl index 1bc0ac261..7c2ddb156 100644 --- a/src/factorization_sparse.jl +++ b/src/factorization_sparse.jl @@ -1,22 +1,29 @@ # Specialize QR for the non-square case # Missing ldiv! definitions: https://github.com/JuliaSparse/SparseArrays.jl/issues/242 function _ldiv!(x::Vector, - A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, - SparseArrays.SPQR.QRSparse, - SparseArrays.CHOLMOD.Factor}, b::Vector) + A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, + SparseArrays.SPQR.QRSparse, + SparseArrays.CHOLMOD.Factor}, b::Vector) x .= A \ b end function _ldiv!(x::AbstractVector, - A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, - SparseArrays.SPQR.QRSparse, - SparseArrays.CHOLMOD.Factor}, b::AbstractVector) + A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, + SparseArrays.SPQR.QRSparse, + SparseArrays.CHOLMOD.Factor}, b::AbstractVector) x .= A \ b end # Ambiguity removal -_ldiv!(::SVector, - A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, - b::AbstractVector) = (A \ b) -_ldiv!(::SVector, A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, - b::SVector) = (A \ b) +function _ldiv!(::SVector, + A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, + LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, + b::AbstractVector) + (A \ b) +end +function _ldiv!(::SVector, + A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, + LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, + b::SVector) + (A \ b) +end diff --git a/src/iterative_wrappers.jl b/src/iterative_wrappers.jl index dbdef5b9b..3c03ea7d6 100644 --- a/src/iterative_wrappers.jl +++ b/src/iterative_wrappers.jl @@ -3,9 +3,9 @@ """ ```julia KrylovJL(args...; KrylovAlg = Krylov.gmres!, - Pl = nothing, Pr = nothing, - gmres_restart = 0, window = 0, - kwargs...) + Pl = nothing, Pr = nothing, + gmres_restart = 0, window = 0, + kwargs...) ``` A generic wrapper over the Krylov.jl krylov-subspace iterative solvers. @@ -19,8 +19,8 @@ struct KrylovJL{F, I, A, K} <: AbstractKrylovSubspaceMethod end function KrylovJL(args...; KrylovAlg = Krylov.gmres!, - gmres_restart = 0, window = 0, - kwargs...) + gmres_restart = 0, window = 0, + kwargs...) return KrylovJL(KrylovAlg, gmres_restart, window, args, kwargs) end @@ -30,7 +30,7 @@ default_alias_b(::KrylovJL, ::Any, ::Any) = true """ ```julia -KrylovJL_CG(args...; kwargs...) +KrylovJL_CG(args...; kwargs...) ``` A generic CG implementation for Hermitian and positive definite linear systems @@ -41,7 +41,7 @@ end """ ```julia -KrylovJL_MINRES(args...; kwargs...) +KrylovJL_MINRES(args...; kwargs...) ``` A generic MINRES implementation for Hermitian linear systems @@ -52,7 +52,7 @@ end """ ```julia -KrylovJL_GMRES(args...; gmres_restart = 0, window = 0, kwargs...) +KrylovJL_GMRES(args...; gmres_restart = 0, window = 0, kwargs...) ``` A generic GMRES implementation for square non-Hermitian linear systems @@ -63,7 +63,7 @@ end """ ```julia -KrylovJL_BICGSTAB(args...; kwargs...) +KrylovJL_BICGSTAB(args...; kwargs...) ``` A generic BICGSTAB implementation for square non-Hermitian linear systems @@ -74,7 +74,7 @@ end """ ```julia -KrylovJL_LSMR(args...; kwargs...) +KrylovJL_LSMR(args...; kwargs...) ``` A generic LSMR implementation for least-squares problems @@ -85,7 +85,7 @@ end """ ```julia -KrylovJL_CRAIGMR(args...; kwargs...) +KrylovJL_CRAIGMR(args...; kwargs...) ``` A generic CRAIGMR implementation for least-norm problems @@ -170,7 +170,7 @@ end # zeroinit allows for init_cacheval to start by initing with A (0,0) function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions; zeroinit = true) + verbose::Bool, assumptions::OperatorAssumptions; zeroinit = true) KS = get_KrylovJL_solver(alg.KrylovAlg) if zeroinit @@ -282,7 +282,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) end stats = @get_cacheval(cache, :KrylovJL_GMRES).stats - resid = !isempty(stats.residuals) ? last(stats.residuals) : zero(eltype(stats.residuals)) + resid = !isempty(stats.residuals) ? last(stats.residuals) : + zero(eltype(stats.residuals)) retcode = if !stats.solved if stats.status == "maximum number of iterations exceeded" diff --git a/src/mkl.jl b/src/mkl.jl index db3106c08..a00882e1d 100644 --- a/src/mkl.jl +++ b/src/mkl.jl @@ -9,9 +9,9 @@ to avoid allocations and does not require libblastrampoline. struct MKLLUFactorization <: AbstractFactorization end function getrf!(A::AbstractMatrix{<:ComplexF64}; - ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), - info = Ref{BlasInt}(), - check = false) + ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), + info = Ref{BlasInt}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -29,9 +29,9 @@ function getrf!(A::AbstractMatrix{<:ComplexF64}; end function getrf!(A::AbstractMatrix{<:ComplexF32}; - ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), - info = Ref{BlasInt}(), - check = false) + ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), + info = Ref{BlasInt}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -49,9 +49,9 @@ function getrf!(A::AbstractMatrix{<:ComplexF32}; end function getrf!(A::AbstractMatrix{<:Float64}; - ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), - info = Ref{BlasInt}(), - check = false) + ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), + info = Ref{BlasInt}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -69,9 +69,9 @@ function getrf!(A::AbstractMatrix{<:Float64}; end function getrf!(A::AbstractMatrix{<:Float32}; - ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), - info = Ref{BlasInt}(), - check = false) + ipiv = similar(A, BlasInt, min(size(A, 1), size(A, 2))), + info = Ref{BlasInt}(), + check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) @@ -89,10 +89,10 @@ function getrf!(A::AbstractMatrix{<:Float32}; end function getrs!(trans::AbstractChar, - A::AbstractMatrix{<:ComplexF64}, - ipiv::AbstractVector{BlasInt}, - B::AbstractVecOrMat{<:ComplexF64}; - info = Ref{BlasInt}()) + A::AbstractMatrix{<:ComplexF64}, + ipiv::AbstractVector{BlasInt}, + B::AbstractVecOrMat{<:ComplexF64}; + info = Ref{BlasInt}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -114,10 +114,10 @@ function getrs!(trans::AbstractChar, end function getrs!(trans::AbstractChar, - A::AbstractMatrix{<:ComplexF32}, - ipiv::AbstractVector{BlasInt}, - B::AbstractVecOrMat{<:ComplexF32}; - info = Ref{BlasInt}()) + A::AbstractMatrix{<:ComplexF32}, + ipiv::AbstractVector{BlasInt}, + B::AbstractVecOrMat{<:ComplexF32}; + info = Ref{BlasInt}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -139,10 +139,10 @@ function getrs!(trans::AbstractChar, end function getrs!(trans::AbstractChar, - A::AbstractMatrix{<:Float64}, - ipiv::AbstractVector{BlasInt}, - B::AbstractVecOrMat{<:Float64}; - info = Ref{BlasInt}()) + A::AbstractMatrix{<:Float64}, + ipiv::AbstractVector{BlasInt}, + B::AbstractVecOrMat{<:Float64}; + info = Ref{BlasInt}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -164,10 +164,10 @@ function getrs!(trans::AbstractChar, end function getrs!(trans::AbstractChar, - A::AbstractMatrix{<:Float32}, - ipiv::AbstractVector{BlasInt}, - B::AbstractVecOrMat{<:Float32}; - info = Ref{BlasInt}()) + A::AbstractMatrix{<:Float32}, + ipiv::AbstractVector{BlasInt}, + B::AbstractVecOrMat{<:Float32}; + info = Ref{BlasInt}()) require_one_based_indexing(A, ipiv, B) LinearAlgebra.LAPACK.chktrans(trans) chkstride1(A, B, ipiv) @@ -197,20 +197,21 @@ const PREALLOCATED_MKL_LU = begin end function LinearSolve.init_cacheval(alg::MKLLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_MKL_LU end -function LinearSolve.init_cacheval(alg::MKLLUFactorization, A::AbstractMatrix{<:Union{Float32,ComplexF32,ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function LinearSolve.init_cacheval(alg::MKLLUFactorization, + A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() end function SciMLBase.solve!(cache::LinearCache, alg::MKLLUFactorization; - kwargs...) + kwargs...) A = cache.A A = convert(AbstractMatrix, A) if cache.isfresh @@ -239,4 +240,4 @@ function SciMLBase.solve!(cache::LinearCache, alg::MKLLUFactorization; SciMLBase.build_linear_solution(alg, cache.u, nothing, cache) =# -end \ No newline at end of file +end diff --git a/src/simplegmres.jl b/src/simplegmres.jl index 76682e547..c2596efc2 100644 --- a/src/simplegmres.jl +++ b/src/simplegmres.jl @@ -9,14 +9,16 @@ specialized dispatches. ## Arguments -* `restart::Bool`: If `true`, then the solver will restart after `memory` iterations. -* `memory::Int = 20`: The number of iterations before restarting. If restart is false, this - value is used to allocate memory and later expanded if more memory is required. -* `blocksize::Int = 0`: If blocksize is `> 0`, the solver assumes that the matrix has a - uniformly sized block diagonal structure with square blocks of size `blocksize`. Misusing - this option will lead to incorrect results. - * If this is set `≤ 0` and during runtime we get a Block Diagonal Matrix, then we will - check if the specialized dispatch can be used. + - `restart::Bool`: If `true`, then the solver will restart after `memory` iterations. + + - `memory::Int = 20`: The number of iterations before restarting. If restart is false, this + value is used to allocate memory and later expanded if more memory is required. + - `blocksize::Int = 0`: If blocksize is `> 0`, the solver assumes that the matrix has a + uniformly sized block diagonal structure with square blocks of size `blocksize`. Misusing + this option will lead to incorrect results. + + + If this is set `≤ 0` and during runtime we get a Block Diagonal Matrix, then we will + check if the specialized dispatch can be used. !!! warning @@ -38,13 +40,13 @@ struct SimpleGMRES{UBD} <: AbstractKrylovSubspaceMethod warm_start::Bool function SimpleGMRES{UBD}(; restart::Bool = true, blocksize::Int = 0, - warm_start::Bool = false, memory::Int = 20) where {UBD} + warm_start::Bool = false, memory::Int = 20) where {UBD} UBD && @assert blocksize > 0 return new{UBD}(restart, memory, blocksize, warm_start) end function SimpleGMRES(; restart::Bool = true, blocksize::Int = 0, - warm_start::Bool = false, memory::Int = 20) + warm_start::Bool = false, memory::Int = 20) return SimpleGMRES{blocksize > 0}(; restart, memory, blocksize, warm_start) end @@ -159,7 +161,7 @@ function init_cacheval(alg::SimpleGMRES{UDB}, args...; kwargs...) where {UDB} end function _init_cacheval(::Val{false}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, kwargs...) + abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, kwargs...) @unpack memory, restart, blocksize, warm_start = alg if zeroinit @@ -210,7 +212,8 @@ function _init_cacheval(::Val{false}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiter rNorm = β ε = abstol + reltol * rNorm - return SimpleGMRESCache{false}(memory, n, restart, maxiters, blocksize, ε, PlisI, PrisI, + return SimpleGMRESCache{false}( + memory, n, restart, maxiters, blocksize, ε, PlisI, PrisI, Pl, Pr, Δx, q, p, x, A, b, abstol, reltol, w, V, s, c, z, R, β, warm_start) end @@ -304,7 +307,8 @@ function SciMLBase.solve!(cache::SimpleGMRESCache{false}, lincache::LinearCache) # Compute and apply current Givens reflection Ωₖ. # [cₖ sₖ] [ r̄ₖ.ₖ ] = [rₖ.ₖ] # [s̄ₖ -cₖ] [hₖ₊₁.ₖ] [ 0 ] - (c[inner_iter], s[inner_iter], R[nr + inner_iter]) = _sym_givens(R[nr + inner_iter], + (c[inner_iter], s[inner_iter], R[nr + inner_iter]) = _sym_givens( + R[nr + inner_iter], Hbis) # Update zₖ = (Qₖ)ᴴβe₁ @@ -387,8 +391,8 @@ function SciMLBase.solve!(cache::SimpleGMRESCache{false}, lincache::LinearCache) end function _init_cacheval(::Val{true}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, - blocksize = alg.blocksize) + abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, + blocksize = alg.blocksize) @unpack memory, restart, warm_start = alg if zeroinit diff --git a/src/simplelu.jl b/src/simplelu.jl index 8049b243d..9c1ad0bf7 100644 --- a/src/simplelu.jl +++ b/src/simplelu.jl @@ -120,7 +120,7 @@ A simple LU-factorization implementation without BLAS. Fast for small matrices. ## Positional Arguments -* pivot::Bool: whether to perform pivoting. Defaults to `true` + - pivot::Bool: whether to perform pivoting. Defaults to `true` """ struct SimpleLUFactorization <: AbstractFactorization pivot::Bool @@ -143,6 +143,6 @@ function SciMLBase.solve!(cache::LinearCache, alg::SimpleLUFactorization; kwargs end function init_cacheval(alg::SimpleLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::Bool, assumptions::OperatorAssumptions) LUSolver(convert(AbstractMatrix, A)) end diff --git a/src/solve_function.jl b/src/solve_function.jl index e26c7c621..5c74199cb 100644 --- a/src/solve_function.jl +++ b/src/solve_function.jl @@ -4,7 +4,7 @@ struct LinearSolveFunction{F} <: AbstractSolveFunction end function SciMLBase.solve!(cache::LinearCache, alg::LinearSolveFunction, - args...; kwargs...) + args...; kwargs...) @unpack A, b, u, p, isfresh, Pl, Pr, cacheval = cache @unpack solve_func = alg diff --git a/test/basictests.jl b/test/basictests.jl index c28113654..cb64a1246 100644 --- a/test/basictests.jl +++ b/test/basictests.jl @@ -218,13 +218,12 @@ end end end - test_algs = [ LUFactorization(), QRFactorization(), SVDFactorization(), RFLUFactorization(), - LinearSolve.defaultalg(prob1.A, prob1.b), + LinearSolve.defaultalg(prob1.A, prob1.b) ] if VERSION >= v"1.9" && LinearSolve.usemkl @@ -289,9 +288,7 @@ end for alg in (("Default", IterativeSolversJL(kwargs...)), ("CG", IterativeSolversJL_CG(kwargs...)), ("GMRES", IterativeSolversJL_GMRES(kwargs...)), - ("IDRS", IterativeSolversJL_IDRS(kwargs...)) - # ("BICGSTAB",IterativeSolversJL_BICGSTAB(kwargs...)), - # ("MINRES",IterativeSolversJL_MINRES(kwargs...)), + ("IDRS", IterativeSolversJL_IDRS(kwargs...)) # ("BICGSTAB",IterativeSolversJL_BICGSTAB(kwargs...)), # ("MINRES",IterativeSolversJL_MINRES(kwargs...)), ) @testset "$(alg[1])" begin test_interface(alg[2], prob1, prob2) @@ -424,7 +421,7 @@ end @testset "LinearSolveFunction" begin function sol_func(A, b, u, p, newA, Pl, Pr, solverdata; verbose = true, - kwargs...) + kwargs...) if verbose == true println("out-of-place solve") end @@ -432,7 +429,7 @@ end end function sol_func!(A, b, u, p, newA, Pl, Pr, solverdata; verbose = true, - kwargs...) + kwargs...) if verbose == true println("in-place solve") end diff --git a/test/default_algs.jl b/test/default_algs.jl index 2ffc03b65..d2aafba9d 100644 --- a/test/default_algs.jl +++ b/test/default_algs.jl @@ -5,24 +5,24 @@ prob = LinearProblem(rand(3, 3), rand(3)) solve(prob) if LinearSolve.appleaccelerate_isavailable() - @test LinearSolve.defaultalg(nothing, zeros(50)).alg === - LinearSolve.DefaultAlgorithmChoice.AppleAccelerateLUFactorization + @test LinearSolve.defaultalg(nothing, zeros(50)).alg === + LinearSolve.DefaultAlgorithmChoice.AppleAccelerateLUFactorization else - @test LinearSolve.defaultalg(nothing, zeros(50)).alg === - LinearSolve.DefaultAlgorithmChoice.RFLUFactorization + @test LinearSolve.defaultalg(nothing, zeros(50)).alg === + LinearSolve.DefaultAlgorithmChoice.RFLUFactorization end prob = LinearProblem(rand(50, 50), rand(50)) solve(prob) if LinearSolve.usemkl - @test LinearSolve.defaultalg(nothing, zeros(600)).alg === - LinearSolve.DefaultAlgorithmChoice.MKLLUFactorization + @test LinearSolve.defaultalg(nothing, zeros(600)).alg === + LinearSolve.DefaultAlgorithmChoice.MKLLUFactorization elseif LinearSolve.appleaccelerate_isavailable() - @test LinearSolve.defaultalg(nothing, zeros(600)).alg === - LinearSolve.DefaultAlgorithmChoice.AppleAccelerateLUFactorization + @test LinearSolve.defaultalg(nothing, zeros(600)).alg === + LinearSolve.DefaultAlgorithmChoice.AppleAccelerateLUFactorization else - @test LinearSolve.defaultalg(nothing, zeros(600)).alg === - LinearSolve.DefaultAlgorithmChoice.LUFactorization + @test LinearSolve.defaultalg(nothing, zeros(600)).alg === + LinearSolve.DefaultAlgorithmChoice.LUFactorization end prob = LinearProblem(rand(600, 600), rand(600)) diff --git a/test/enzyme.jl b/test/enzyme.jl index 89903a858..ac552a45a 100644 --- a/test/enzyme.jl +++ b/test/enzyme.jl @@ -22,8 +22,8 @@ f(A, b1) # Uses BLAS Enzyme.autodiff(Reverse, f, Duplicated(copy(A), dA), Duplicated(copy(b1), db1)) -dA2 = ForwardDiff.gradient(x->f(x,eltype(x).(b1)), copy(A)) -db12 = ForwardDiff.gradient(x->f(eltype(x).(A),x), copy(b1)) +dA2 = ForwardDiff.gradient(x -> f(x, eltype(x).(b1)), copy(A)) +db12 = ForwardDiff.gradient(x -> f(eltype(x).(A), x), copy(b1)) @test dA ≈ dA2 @test db1 ≈ db12 @@ -33,13 +33,20 @@ dA = zeros(n, n); b1 = rand(n); db1 = zeros(n); -_ff = (x,y) -> f(x,y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)) +_ff = (x, y) -> f(x, + y; + alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)) _ff(copy(A), copy(b1)) -Enzyme.autodiff(Reverse, (x,y) -> f(x,y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)), Duplicated(copy(A), dA), Duplicated(copy(b1), db1)) +Enzyme.autodiff(Reverse, + (x, y) -> f(x, + y; + alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)), + Duplicated(copy(A), dA), + Duplicated(copy(b1), db1)) -dA2 = ForwardDiff.gradient(x->f(x,eltype(x).(b1)), copy(A)) -db12 = ForwardDiff.gradient(x->f(eltype(x).(A),x), copy(b1)) +dA2 = ForwardDiff.gradient(x -> f(x, eltype(x).(b1)), copy(A)) +db12 = ForwardDiff.gradient(x -> f(eltype(x).(A), x), copy(b1)) @test dA ≈ dA2 @test db1 ≈ db12 @@ -51,7 +58,6 @@ b1 = rand(n); db1 = zeros(n); db12 = zeros(n); - # Batch test n = 4 A = rand(n, n); @@ -79,11 +85,12 @@ end y = [0.0] dy1 = [1.0] dy2 = [1.0] -Enzyme.autodiff(Reverse, fbatch, Duplicated(y, dy1), Duplicated(copy(A), dA), Duplicated(copy(b1), db1)) +Enzyme.autodiff( + Reverse, fbatch, Duplicated(y, dy1), Duplicated(copy(A), dA), Duplicated(copy(b1), db1)) -@test y[1] ≈ f(copy(A),b1) -dA_2 = ForwardDiff.gradient(x->f(x,eltype(x).(b1)), copy(A)) -db1_2 = ForwardDiff.gradient(x->f(eltype(x).(A),x), copy(b1)) +@test y[1] ≈ f(copy(A), b1) +dA_2 = ForwardDiff.gradient(x -> f(x, eltype(x).(b1)), copy(A)) +db1_2 = ForwardDiff.gradient(x -> f(eltype(x).(A), x), copy(b1)) @test dA ≈ dA_2 @test db1 ≈ db1_2 @@ -95,7 +102,8 @@ dA .= 0 dA2 .= 0 db1 .= 0 db12 .= 0 -Enzyme.autodiff(Reverse, fbatch, BatchDuplicated(y, (dy1, dy2)), BatchDuplicated(copy(A), (dA, dA2)), BatchDuplicated(copy(b1), (db1, db12))) +Enzyme.autodiff(Reverse, fbatch, BatchDuplicated(y, (dy1, dy2)), + BatchDuplicated(copy(A), (dA, dA2)), BatchDuplicated(copy(b1), (db1, db12))) @test dA ≈ dA_2 @test db1 ≈ db1_2 @@ -119,11 +127,12 @@ b2 = rand(n); db2 = zeros(n); f(A, b1, b2) -Enzyme.autodiff(Reverse, f, Duplicated(copy(A), dA), Duplicated(copy(b1), db1), Duplicated(copy(b2), db2)) +Enzyme.autodiff(Reverse, f, Duplicated(copy(A), dA), + Duplicated(copy(b1), db1), Duplicated(copy(b2), db2)) -dA2 = ForwardDiff.gradient(x->f(x,eltype(x).(b1),eltype(x).(b2)), copy(A)) -db12 = ForwardDiff.gradient(x->f(eltype(x).(A),x,eltype(x).(b2)), copy(b1)) -db22 = ForwardDiff.gradient(x->f(eltype(x).(A),eltype(x).(b1),x), copy(b2)) +dA2 = ForwardDiff.gradient(x -> f(x, eltype(x).(b1), eltype(x).(b2)), copy(A)) +db12 = ForwardDiff.gradient(x -> f(eltype(x).(A), x, eltype(x).(b2)), copy(b1)) +db22 = ForwardDiff.gradient(x -> f(eltype(x).(A), eltype(x).(b1), x), copy(b2)) @test dA ≈ dA2 @test db1 ≈ db12 @@ -142,7 +151,8 @@ f2(A, b1, b2) dA = zeros(n, n); db1 = zeros(n); db2 = zeros(n); -Enzyme.autodiff(Reverse, f2, Duplicated(copy(A), dA), Duplicated(copy(b1), db1), Duplicated(copy(b2), db2)) +Enzyme.autodiff(Reverse, f2, Duplicated(copy(A), dA), + Duplicated(copy(b1), db1), Duplicated(copy(b2), db2)) @test dA ≈ dA2 @test db1 ≈ db12 @@ -169,10 +179,9 @@ A = rand(n, n); dA = zeros(n, n); b1 = rand(n); for alg in ( - LUFactorization(), - RFLUFactorization(), - # KrylovJL_GMRES(), fails - ) + LUFactorization(), + RFLUFactorization() # KrylovJL_GMRES(), fails +) @show alg function fb(b) prob = LinearProblem(A, b) @@ -192,7 +201,7 @@ for alg in ( end |> collect @show en_jac - @test en_jac ≈ fd_jac rtol=1e-4 + @test en_jac≈fd_jac rtol=1e-4 function fA(A) prob = LinearProblem(A, b1) @@ -212,5 +221,5 @@ for alg in ( end |> collect @show en_jac - @test en_jac ≈ fd_jac rtol=1e-4 -end \ No newline at end of file + @test en_jac≈fd_jac rtol=1e-4 +end diff --git a/test/gpu/cuda.jl b/test/gpu/cuda.jl index 6ce8d2916..d8ea3106b 100644 --- a/test/gpu/cuda.jl +++ b/test/gpu/cuda.jl @@ -27,19 +27,19 @@ function test_interface(alg, prob1, prob2) x2 = prob2.u0 y = solve(prob1, alg; cache_kwargs...) - @test CUDA.@allowscalar(Array(A1 * y) ≈ Array(b1)) + @test CUDA.@allowscalar(Array(A1 * y)≈Array(b1)) cache = SciMLBase.init(prob1, alg; cache_kwargs...) # initialize cache solve!(cache) - @test CUDA.@allowscalar(Array(A1 * cache.u) ≈ Array(b1)) + @test CUDA.@allowscalar(Array(A1 * cache.u)≈Array(b1)) cache.A = copy(A2) solve!(cache) - @test CUDA.@allowscalar(Array(A2 * cache.u) ≈ Array(b1)) + @test CUDA.@allowscalar(Array(A2 * cache.u)≈Array(b1)) cache.b = copy(b2) solve!(cache) - @test CUDA.@allowscalar(Array(A2 * cache.u) ≈ Array(b2)) + @test CUDA.@allowscalar(Array(A2 * cache.u)≈Array(b2)) return end @@ -83,7 +83,7 @@ prob1 = LinearProblem(A', b) prob2 = LinearProblem(transpose(A), b) @testset "Adjoint/Transpose Type: $(alg)" for alg in (NormalCholeskyFactorization(), - CholeskyFactorization(), LUFactorization(), QRFactorization(), nothing) + CholeskyFactorization(), LUFactorization(), QRFactorization(), nothing) sol = solve(prob1, alg; alias_A = false) @test norm(A' * sol.u .- b) < 1e-5 diff --git a/test/hypretests.jl b/test/hypretests.jl index 3cceb6765..c41079c1c 100644 --- a/test/hypretests.jl +++ b/test/hypretests.jl @@ -1,7 +1,7 @@ using HYPRE using HYPRE.LibHYPRE: HYPRE_BigInt, - HYPRE_Complex, HYPRE_IJMatrixGetValues, - HYPRE_IJVectorGetValues, HYPRE_Int + HYPRE_Complex, HYPRE_IJMatrixGetValues, + HYPRE_IJVectorGetValues, HYPRE_Int using LinearAlgebra using LinearSolve using MPI diff --git a/test/pardiso/pardiso.jl b/test/pardiso/pardiso.jl index 36c24ece5..6753be21b 100644 --- a/test/pardiso/pardiso.jl +++ b/test/pardiso/pardiso.jl @@ -2,9 +2,9 @@ using LinearSolve, SparseArrays, Random import Pardiso A1 = sparse([1.0 0 -2 3 - 0 5 1 2 - -2 1 4 -7 - 3 2 -7 5]) + 0 5 1 2 + -2 1 4 -7 + 3 2 -7 5]) b1 = rand(4) prob1 = LinearProblem(A1, b1) @@ -119,7 +119,7 @@ iparm = [ (61, 0), (62, 0), (63, 0), - (64, 0), + (64, 0) ] for i in iparm diff --git a/test/resolve.jl b/test/resolve.jl index 9be8b663d..bd2922f54 100644 --- a/test/resolve.jl +++ b/test/resolve.jl @@ -3,14 +3,14 @@ using LinearSolve, LinearAlgebra, SparseArrays, InteractiveUtils, Test for alg in subtypes(LinearSolve.AbstractFactorization) @show alg if !(alg in [ - DiagonalFactorization, - CudaOffloadFactorization, - AppleAccelerateLUFactorization, - MetalLUFactorization, - ]) && + DiagonalFactorization, + CudaOffloadFactorization, + AppleAccelerateLUFactorization, + MetalLUFactorization + ]) && (!(alg == AppleAccelerateLUFactorization) || LinearSolve.appleaccelerate_isavailable()) && - (!(alg == MKLLUFactorization) || LinearSolve.usemkl) + (!(alg == MKLLUFactorization) || LinearSolve.usemkl) A = [1.0 2.0; 3.0 4.0] alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] && (A = sparse(A)) @@ -50,19 +50,19 @@ linsolve.A = A @test solve!(linsolve).u ≈ [1.0, 0.5] A = Symmetric([1.0 2.0 - 2.0 1.0]) + 2.0 1.0]) b = [1.0, 2.0] prob = LinearProblem(A, b) linsolve = init(prob, BunchKaufmanFactorization(), alias_A = false, alias_b = false) @test solve!(linsolve).u ≈ [1.0, 0.0] @test solve!(linsolve).u ≈ [1.0, 0.0] A = Symmetric([1.0 2.0 - 2.0 1.0]) + 2.0 1.0]) linsolve.A = A @test solve!(linsolve).u ≈ [1.0, 0.0] A = [1.0 2.0 - 2.0 1.0] + 2.0 1.0] A = Symmetric(A * A') b = [1.0, 2.0] prob = LinearProblem(A, b) @@ -70,7 +70,7 @@ linsolve = init(prob, CholeskyFactorization(), alias_A = false, alias_b = false) @test solve!(linsolve).u ≈ [-1 / 3, 2 / 3] @test solve!(linsolve).u ≈ [-1 / 3, 2 / 3] A = [1.0 2.0 - 2.0 1.0] + 2.0 1.0] A = Symmetric(A * A') b = [1.0, 2.0] @test solve!(linsolve).u ≈ [-1 / 3, 2 / 3] diff --git a/test/sparse_vector.jl b/test/sparse_vector.jl index 7f80ae1a5..a7ace0202 100644 --- a/test/sparse_vector.jl +++ b/test/sparse_vector.jl @@ -12,7 +12,7 @@ function hess_sparse(x::Vector{T}) where {T} 1.0, 1.0, 12.0 * x[5]^2 + 1.0, - 1.0, + 1.0 ] end rowval = [1, 1, 2, 2, 3, 4, 5, 6] @@ -31,7 +31,7 @@ x0 = [ 1.023999999999997e-7, -1.0, 0.33141395338218227, - -1.0, + -1.0 ] n = length(x0) hess_mat = sparse(rowval, colval, hess_sparse(x0), n, n)