Skip to content

Commit

Permalink
Merge pull request #273 from j-fu/abstractsparse
Browse files Browse the repository at this point in the history
Use AbstractSparseMatrixCSC for sparse factorizations
  • Loading branch information
ChrisRackauckas authored Feb 20, 2023
2 parents a7b7d2b + 82da413 commit 5840ee7
Show file tree
Hide file tree
Showing 6 changed files with 52 additions and 25 deletions.
5 changes: 3 additions & 2 deletions lib/LinearSolvePardiso/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,13 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae"
Pardiso = "46dd5b70-b6fb-5a00-ae2d-e8fea33afaf2"
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"

[compat]
SciMLBase = "1.25"
LinearSolve = "1.24"
Pardiso = "0.5"
Pardiso = "0.5"
SciMLBase = "1.25"
UnPack = "1"
julia = "1.6"

Expand Down
20 changes: 17 additions & 3 deletions lib/LinearSolvePardiso/src/LinearSolvePardiso.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
module LinearSolvePardiso

using Pardiso, LinearSolve, SciMLBase
using SparseArrays
using SparseArrays: nonzeros, rowvals, getcolptr

using UnPack

Base.@kwdef struct PardisoJL <: LinearSolve.SciMLLinearSolveAlgorithm
Expand All @@ -17,8 +20,16 @@ LinearSolve.needs_concrete_A(alg::PardisoJL) = true

# TODO schur complement functionality

function LinearSolve.init_cacheval(alg::PardisoJL, A, b, u, Pl, Pr, maxiters::Int, abstol,
reltol, verbose::Bool,
function LinearSolve.init_cacheval(alg::PardisoJL,
A,
b,
u,
Pl,
Pr,
maxiters::Int,
abstol,
reltol,
verbose::Bool,
assumptions::LinearSolve.OperatorAssumptions)
@unpack nprocs, solver_type, matrix_type, iparm, dparm = alg
A = convert(AbstractMatrix, A)
Expand Down Expand Up @@ -90,7 +101,10 @@ function LinearSolve.init_cacheval(alg::PardisoJL, A, b, u, Pl, Pr, maxiters::In
Pardiso.set_iparm!(solver, 3, round(Int, abs(log10(reltol)), RoundDown) * 10 + 1)
end

Pardiso.pardiso(solver, u, A, b)
Pardiso.pardiso(solver,
u,
SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)),
b)

return solver
end
Expand Down
4 changes: 1 addition & 3 deletions lib/LinearSolvePardiso/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ cache_kwargs = (; verbose = true, abstol = 1e-8, reltol = 1e-8, maxiter = 30)

prob2 = LinearProblem(A2, b2)

for alg in (PardisoJL(),
MKLPardisoFactorize(),
MKLPardisoIterate())
for alg in (PardisoJL(), MKLPardisoFactorize(), MKLPardisoIterate())
u = solve(prob1, alg; cache_kwargs...).u
@test A1 * u b1

Expand Down
1 change: 1 addition & 0 deletions src/LinearSolve.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ using Base: cache_dependencies, Bool
using LinearAlgebra
using IterativeSolvers: Identity
using SparseArrays
using SparseArrays: AbstractSparseMatrixCSC, nonzeros, rowvals, getcolptr
using SciMLBase: AbstractLinearAlgorithm
using SciMLOperators
using SciMLOperators: AbstractSciMLOperator, IdentityOperator
Expand Down
4 changes: 2 additions & 2 deletions src/default.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,13 @@ function defaultalg(A::Diagonal, b, ::OperatorAssumptions{Nothing})
DiagonalFactorization()
end

function defaultalg(A::SparseMatrixCSC{Tv, Ti}, b,
function defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b,
::OperatorAssumptions{true}) where {Tv, Ti}
SparspakFactorization()
end

@static if INCLUDE_SPARSE
function defaultalg(A::SparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b,
function defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b,
::OperatorAssumptions{true}) where {Ti}
if length(b) <= 10_000
KLUFactorization()
Expand Down
43 changes: 28 additions & 15 deletions src/factorization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ end

function do_factorization(alg::LUFactorization, A, b, u)
A = convert(AbstractMatrix, A)
if A isa SparseMatrixCSC
return lu(A)
if A isa AbstractSparseMatrixCSC
return lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)))
else
fact = lu!(A, alg.pivot)
end
Expand Down Expand Up @@ -277,7 +277,8 @@ function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int
copy(nonzeros(A)), 0)
finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, res)
else
return SuiteSparse.UMFPACK.UmfpackLU(A)
return SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(size(A)..., getcolptr(A),
rowvals(A), nonzeros(A)))
end
end

Expand All @@ -290,12 +291,15 @@ function SciMLBase.solve(cache::LinearCache, alg::UMFPACKFactorization; kwargs..
if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) ==
cache.cacheval.colptr &&
SuiteSparse.decrement(SparseArrays.getrowval(A)) == cache.cacheval.rowval)
fact = lu(A)
fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
else
fact = lu!(cache.cacheval, A)
fact = lu!(cache.cacheval,
SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
end
else
fact = lu(A)
fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)))
end
cache = set_cacheval(cache, fact)
end
Expand All @@ -312,7 +316,9 @@ end
function init_cacheval(alg::KLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol,
reltol,
verbose::Bool, assumptions::OperatorAssumptions)
return KLU.KLUFactorization(convert(AbstractMatrix, A)) # this takes care of the copy internally.
A = convert(AbstractMatrix, A)
return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
end

function SciMLBase.solve(cache::LinearCache, alg::KLUFactorization; kwargs...)
Expand All @@ -323,21 +329,25 @@ function SciMLBase.solve(cache::LinearCache, alg::KLUFactorization; kwargs...)
if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) ==
cache.cacheval.colptr &&
SuiteSparse.decrement(SparseArrays.getrowval(A)) == cache.cacheval.rowval)
fact = KLU.klu(A)
fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
else
# If we have a cacheval already, run umfpack_symbolic to ensure the symbolic factorization exists
# This won't recompute if it does.
KLU.klu_analyze!(cache.cacheval)
copyto!(cache.cacheval.nzval, A.nzval)
copyto!(cache.cacheval.nzval, nonzeros(A))
if cache.cacheval._numeric === C_NULL # We MUST have a numeric factorization for reuse, unlike UMFPACK.
KLU.klu_factor!(cache.cacheval)
end
fact = KLU.klu!(cache.cacheval, A)
fact = KLU.klu!(cache.cacheval,
SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
end
else
# New fact each time since the sparsity pattern can change
# and thus it needs to reallocate
fact = KLU.klu(A)
fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
end
cache = set_cacheval(cache, fact)
end
Expand Down Expand Up @@ -511,17 +521,20 @@ function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int,
reltol,
verbose::Bool, assumptions::OperatorAssumptions)
A = convert(AbstractMatrix, A)
return sparspaklu(A, factorize = false)
return sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)),
factorize = false)
end

function SciMLBase.solve(cache::LinearCache, alg::SparspakFactorization; kwargs...)
A = cache.A
A = convert(AbstractMatrix, A)
if cache.isfresh
if cache.cacheval !== nothing && alg.reuse_symbolic
fact = sparspaklu!(cache.cacheval, A)
fact = sparspaklu!(cache.cacheval,
SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
else
fact = sparspaklu(A)
fact = sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A),
nonzeros(A)))
end
cache = set_cacheval(cache, fact)
end
Expand Down

0 comments on commit 5840ee7

Please sign in to comment.