Skip to content

Commit

Permalink
improve efficiency of standard rhs evaluation for tridiagonal matrices (
Browse files Browse the repository at this point in the history
#51)

* improve efficiency of standard rhs evaluation for tridiagonal matrices

* load SparseArrays

* format
  • Loading branch information
ranocha authored Apr 5, 2024
1 parent fcce9dd commit b1f2e4e
Show file tree
Hide file tree
Showing 4 changed files with 104 additions and 17 deletions.
2 changes: 1 addition & 1 deletion src/PositiveIntegrators.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
module PositiveIntegrators

# 1. Load dependencies
using LinearAlgebra: LinearAlgebra, I, diag, diagind, mul!
using LinearAlgebra: LinearAlgebra, Tridiagonal, I, diag, diagind, mul!
using SparseArrays: SparseArrays, AbstractSparseMatrix
using StaticArrays: SVector, MVector, SMatrix, StaticArray, @SVector, @SMatrix

Expand Down
61 changes: 45 additions & 16 deletions src/proddest.jl
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,8 @@ end
# New problem type ConservativePDSProblem
"""
ConservativePDSProblem(P, u0, tspan, p = NullParameters();
p_prototype = nothing,
analytic=nothing)
p_prototype = nothing,
analytic = nothing)
A structure describing a conservative system of ordinary differential equation in form of a production-destruction system (PDS).
`P` denotes the production matrix.
Expand Down Expand Up @@ -257,7 +257,8 @@ function ConservativePDSFunction{iip}(P; kwargs...) where {iip}
end

# Most specific constructor for ConservativePDSFunction
function ConservativePDSFunction{iip, FullSpecialize}(P; p_prototype = nothing,
function ConservativePDSFunction{iip, FullSpecialize}(P;
p_prototype = nothing,
analytic = nothing) where {iip}
if p_prototype isa AbstractSparseMatrix
tmp = zeros(eltype(p_prototype), (size(p_prototype, 1),))
Expand Down Expand Up @@ -305,21 +306,49 @@ end
# Evaluation of a ConservativePDSFunction (in-place)
function (PD::ConservativePDSFunction)(du, u, p, t)
PD.p(PD.p_prototype, u, p, t)
sum_terms!(du, PD.tmp, PD.p_prototype)
return nothing
end

if PD.p_prototype isa AbstractSparseMatrix
# Same result but more efficient - at least currently for SparseMatrixCSC
fill!(PD.tmp, one(eltype(PD.tmp)))
mul!(vec(du), PD.p_prototype, PD.tmp)
sum!(PD.tmp', PD.p_prototype)
vec(du) .-= PD.tmp
else
# This implementation does not need any auxiliary vectors
for i in 1:length(u)
du[i] = zero(eltype(du))
for j in 1:length(u)
du[i] += PD.p_prototype[i, j] - PD.p_prototype[j, i]
end
# Generic fallback (for dense arrays)
# This implementation does not need any auxiliary vectors
@inline function sum_terms!(du, tmp, P)
for i in 1:length(du)
du[i] = zero(eltype(du))
for j in 1:length(du)
du[i] += P[i, j] - P[j, i]
end
end
return nothing
end

# Same result but more efficient - at least currently for SparseMatrixCSC
@inline function sum_terms!(du, tmp, P::AbstractSparseMatrix)
fill!(tmp, one(eltype(tmp)))
mul!(vec(du), P, tmp)
sum!(tmp', P)
vec(du) .-= tmp
return nothing
end

@inline function sum_terms!(du, tmp, P::Tridiagonal)
Base.require_one_based_indexing(du, P.dl, P.du)
@assert length(du) == length(P.dl) + 1 == length(P.du) + 1

let i = 1
Pij = P.du[i]
Pji = P.dl[i]
du[i] = Pij - Pji
end
for i in 2:(length(du) - 1)
Pij = P.dl[i - 1] + P.du[i]
Pji = P.du[i - 1] + P.dl[i]
du[i] = Pij - Pji
end
let i = lastindex(du)
Pij = P.dl[i - 1]
Pji = P.du[i - 1]
du[i] = Pij - Pji
end
return nothing
end
2 changes: 2 additions & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
[deps]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[compat]
Expand Down
56 changes: 56 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
using Test
using LinearAlgebra
using SparseArrays

using OrdinaryDiffEq
using PositiveIntegrators

Expand All @@ -14,6 +17,59 @@ using Aqua: Aqua
ambiguities = false,)
end

@testset "ConservativePDSFunction" begin
prod_1! = (P, u, p, t) -> begin
fill!(P, zero(eltype(P)))
for i in 1:(length(u) - 1)
P[i, i + 1] = i * u[i]
end
return nothing
end
prod_2! = (P, u, p, t) -> begin
fill!(P, zero(eltype(P)))
for i in 1:(length(u) - 1)
P[i + 1, i] = i * u[i + 1]
end
return nothing
end
prod_3! = (P, u, p, t) -> begin
fill!(P, zero(eltype(P)))
for i in 1:(length(u) - 1)
P[i, i + 1] = i * u[i]
P[i + 1, i] = i * u[i + 1]
end
return nothing
end

n = 10
P_tridiagonal = Tridiagonal(rand(n - 1), zeros(n), rand(n - 1))
P_dense = Matrix(P_tridiagonal)
P_sparse = sparse(P_tridiagonal)
u0 = rand(n)
tspan = (0.0, 1.0)

du_tridiagonal = similar(u0)
du_dense = similar(u0)
du_sparse = similar(u0)

for prod! in (prod_1!, prod_2!, prod_3!)
prob_tridiagonal = ConservativePDSProblem(prod!, u0, tspan;
p_prototype = P_tridiagonal)
prob_dense = ConservativePDSProblem(prod!, u0, tspan;
p_prototype = P_dense)
prob_sparse = ConservativePDSProblem(prod!, u0, tspan;
p_prototype = P_sparse)

prob_tridiagonal.f(du_tridiagonal, u0, nothing, 0.0)
prob_dense.f(du_dense, u0, nothing, 0.0)
prob_sparse.f(du_sparse, u0, nothing, 0.0)

@test du_tridiagonal du_dense
@test du_tridiagonal du_sparse
@test du_dense du_sparse
end
end

@testset "PDSProblem" begin
@testset "Linear model problem" begin
# This is an example of a conservative PDS
Expand Down

0 comments on commit b1f2e4e

Please sign in to comment.