Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

On-the-fly sparse interpolation #7

Open
wants to merge 35 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
5369e80
print time steps
FHoltorf Feb 6, 2023
918e6f7
print out more informative log
FHoltorf Feb 6, 2023
df0f1b3
define sparse interpolation primitives
FHoltorf Feb 22, 2023
af0b6e2
use rel. tolerance for rank adaptation
FHoltorf Feb 22, 2023
8878e20
first steps toward allowing for continuous DEIM with the unconvention…
FHoltorf Feb 22, 2023
d6b0f4b
working status
FHoltorf Mar 12, 2023
5a924eb
ignore data file formats and figures
FHoltorf Mar 13, 2023
a83853f
update testing environment and add data-informed test
FHoltorf Mar 13, 2023
34f05c8
update test env
FHoltorf Mar 13, 2023
56b59cb
fix bug in greedy integrator for data driven problem
FHoltorf Mar 13, 2023
ac6afca
fix initialization change for data driven problems
FHoltorf Mar 13, 2023
c802813
fix bug in solution time range --> endpoint now correctly stored
FHoltorf Mar 13, 2023
bdf3c39
rm print statement
FHoltorf Mar 13, 2023
d6818d6
fix tests for data driven reduction and include tests for data inform…
FHoltorf Mar 13, 2023
d74b9a4
add sparse interpolation tests and clean up
FHoltorf Mar 13, 2023
8597b2f
rm reexport of LinearAlgebra => extension of rank works as intended now
FHoltorf Mar 13, 2023
d723125
clean up
FHoltorf Mar 13, 2023
a30bc7f
update to LowRankArithmetic 0.1.3 and Julia 1.8 compats
FHoltorf Mar 13, 2023
75e56e7
add test for sparse interpolation integrator
FHoltorf Mar 13, 2023
11a29ac
change CI to Julia 1.8
FHoltorf Mar 13, 2023
f4debd7
fix sparse interpolation lr integration test
FHoltorf Mar 13, 2023
0a0cb3a
debugging index selection errors
FHoltorf Mar 13, 2023
3026ba0
revert to normal tests
FHoltorf Mar 13, 2023
b03d526
fix progress printouts
FHoltorf Mar 13, 2023
fd1ab53
rm test env dependence for LRI
FHoltorf Mar 13, 2023
01b7a9c
rename UnconventionalDEIM_Cache to OnTheFlyInterpolation_Cache
FHoltorf Mar 14, 2023
c70a966
restructure filesystem => on the fly interpolation into a separate fi…
FHoltorf Mar 14, 2023
45b105e
on the fly sparse interpolation for projector splitting algorithms
FHoltorf Mar 14, 2023
eed9d38
fix bug in index selection and strengthen test
FHoltorf Mar 14, 2023
bf2005f
disable x86 CI
FHoltorf Mar 14, 2023
15aad94
rm docstring for functor
FHoltorf Mar 14, 2023
e0eba3d
type clean up
FHoltorf Apr 27, 2023
5410dc6
adjust initialization for correct fsal updates
FHoltorf Jan 30, 2024
242db61
add progressmeter
FHoltorf Jan 31, 2024
1cb2da0
flush progressmeter to stream
FHoltorf Jan 31, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@ jobs:
fail-fast: false
matrix:
version:
- '1.6'
- '1.8'
#- 'nightly' avoid for now
os:
- ubuntu-latest
arch:
- x64
- x86
#- x86
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,6 @@ examples/.ipynb_checkpoints
*.mp4
products/
.DS_STORE
*.jld2
*.pdf

5 changes: 3 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,18 @@ version = "0.1.0"

[deps]
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LowRankArithmetic = "8016d1d1-7a3b-4ad2-b79e-5984174ed314"
ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"

[compat]
DifferentialEquations = "7"
LowRankArithmetic = "0.1.2"
Reexport = "1"
UnPack = "1"
julia = "1.6"
julia = "1.8"

[extras]
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Expand Down
12 changes: 10 additions & 2 deletions src/LowRankIntegrators.jl
Original file line number Diff line number Diff line change
@@ -1,12 +1,20 @@
module LowRankIntegrators
using Reexport, UnPack
using Reexport, UnPack, DocStringExtensions, LinearAlgebra, ProgressMeter
import DifferentialEquations: step!, set_u!, init

@reexport using LinearAlgebra, DifferentialEquations, LowRankArithmetic
@reexport using DifferentialEquations, LowRankArithmetic

include("utils.jl")
export orthonormalize!, GradientDescent, QR, SVD, SecondMomentMatching, normal_component

include("sparse_interpolation.jl")
export SparseFunctionInterpolator, SparseMatrixInterpolator,
DEIMInterpolator, AdjointDEIMInterpolator,
DEIM, QDEIM, LDEIM,
ComponentFunction, SparseInterpolation,
update_interpolator!, eval!,
index_selection

include("primitives.jl")
export MatrixDEProblem, MatrixDataProblem, MatrixHybridProblem
DLRIntegrator, DLRSolution,
Expand Down
5 changes: 4 additions & 1 deletion src/integrators.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# basic catch all cases:
include("integrators/on_the_fly_interpolation.jl")
include("integrators/projector_splitting.jl")
include("integrators/unconventional.jl")
include("integrators/data_integrator.jl")
include("integrators/rank_adaptive_unconventional.jl")
include("integrators/greedy_integrator.jl")
include("integrators/greedy_integrator.jl")

20 changes: 4 additions & 16 deletions src/integrators/greedy_integrator.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# greedy approach to fit
struct GreedyIntegrator_Cache
struct GreedyIntegrator_Cache <: AbstractDLRAlgorithm_Cache
Y
X
XZ
Expand All @@ -21,15 +21,15 @@ function GreedyIntegrator(; Z_alg = Tsit5(), Z_kwargs = Dict{Symbol,Any}())
return GreedyIntegrator(params)
end

function alg_cache(prob::MatrixDataProblem, alg::GreedyIntegrator, u::TwoFactorRepresentation, dt)
function alg_cache(prob::MatrixDataProblem, alg::GreedyIntegrator, u::TwoFactorRepresentation, dt; t0 = prob.tspan[1])
X = zeros(size(u))
r = rank(u)
n = size(X,1)
XZ = zeros(n,r)
return GreedyIntegrator_Cache(prob.y, X, XZ, nothing, nothing, nothing)
end

function alg_cache(prob::MatrixDataProblem, alg::GreedyIntegrator, u::SVDLikeRepresentation, dt)
function alg_cache(prob::MatrixDataProblem, alg::GreedyIntegrator, u::SVDLikeRepresentation, dt; t0 = prob.tspan[1])
X = zeros(size(u))
r = rank(u)
n, m = size(X)
Expand All @@ -38,26 +38,14 @@ function alg_cache(prob::MatrixDataProblem, alg::GreedyIntegrator, u::SVDLikeRep
return GreedyIntegrator_Cache(prob.y, X, nothing, XV, XU, nothing)
end

function alg_cache(prob::MatrixHybridProblem, alg::GreedyIntegrator, u, dt)
function alg_cache(prob::MatrixHybridProblem, alg::GreedyIntegrator, u, dt; t0 = prob.tspan[1])
XZ = zeros(size(u,1), rank(u))
X = zeros(size(u)...)
ZProblem = ODEProblem(prob.f, u.Z, prob.tspan, u.U)
ZIntegrator = init(ZProblem, alg.alg_params.Z_alg; save_everystep=false, alg.alg_params.Z_kwargs...)
return GreedyIntegrator_Cache(prob.y, X, XZ, nothing, nothing, ZIntegrator)
end

function init(prob::MatrixDataProblem, alg::GreedyIntegrator, dt)
t0, tf = prob.tspan
@assert tf > t0 "Integration in reverse time direction is not supported"
u = deepcopy(prob.u0)
# initialize solution
sol = init_sol(dt, t0, tf, prob.u0)
# initialize cache
cache = alg_cache(prob, alg, u, dt)
sol.Y[1] = deepcopy(prob.u0)
return DLRIntegrator(u, t0, dt, sol, alg, cache, typeof(prob), 0)
end

function init(prob::MatrixHybridProblem, alg::GreedyIntegrator, dt)
t0, tf = prob.tspan
@assert tf > t0 "Integration in reverse time direction is not supported"
Expand Down
171 changes: 171 additions & 0 deletions src/integrators/on_the_fly_interpolation.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
rank_DEIM(::AbstractDLRAlgorithm_Cache) = -1
interpolation_indices(::AbstractDLRAlgorithm_Cache) = (Int[], Int[])

struct OnTheFlyInterpolation_Cache
params::SparseInterpolation
u_prev::SVDLikeRepresentation
Π::SparseFunctionInterpolator
Π_K::SparseFunctionInterpolator
Π_L::SparseFunctionInterpolator
Π_S::SparseFunctionInterpolator
end

function select_idcs(F_rows::AbstractMatrix, F_cols::AbstractMatrix, selection_alg::IndexSelectionAlgorithm)
range = svd(F_cols)
corange = svd(F_rows)
S = max.(range.S, corange.S)
row_idcs = index_selection(range.U, S, selection_alg)
col_idcs = index_selection(Matrix(corange.V), S, selection_alg)
return range.U, corange.V, row_idcs, col_idcs
end

function select_idcs(F::SVDLikeRepresentation, selection_alg::IndexSelectionAlgorithm)
F_svd = svd(F)
row_idcs = index_selection(F_svd.U, diag(F_svd.S), selection_alg)
col_idcs = index_selection(F_svd.V, diag(F_svd.S), selection_alg)
UF = F_svd.U[:, 1:length(row_idcs)]
VF = F_svd.V[:, 1:length(col_idcs)]
return UF, VF, row_idcs, col_idcs
end

function sparse_selection(interpolation_cache, u, t, dt)
@unpack params = interpolation_cache
if params.update_scheme == :avg_flow
@unpack u_prev = interpolation_cache
@unpack selection_alg = params
return select_idcs(u - u_prev, selection_alg)
elseif params.update_scheme == :last_iterate
@unpack Π_K, Π_L = interpolation_cache
@unpack selection_alg = params
eval!(Π_L, u, (), t+dt)
eval!(Π_K, u, (), t+dt)
UF, VF, row_idcs, col_idcs = select_idcs(Π_L.cache.rows, Π_K.cache.cols, selection_alg)
return UF, VF, row_idcs, col_idcs
else
error("Update scheme $(params.update_scheme) not supported")
end

# eval index selection

end

function compute_weights(range, corange, row_idcs, col_idcs, u, t, dt, interpolation_cache)
@unpack params = interpolation_cache
if size(range, 2) != length(row_idcs)
@unpack Π = interpolation_cache
cols = Matrix{eltype(Π.cache.cols)}(undef, Π.F.n_rows, length(col_idcs))
eval_cols!(cols, Π, u, (), t+dt, col_idcs)
UF = svd(cols).U
@views range_weights = UF/UF[row_idcs,:]
else
@views range_weights = range/range[row_idcs,:]
end

if size(corange, 2) != length(col_idcs)
@unpack Π = interpolation_cache
rows = Matrix{eltype(Π.cache.cols)}(undef, length(row_idcs), Π.F.n_cols)
eval_rows!(rows, Π, u, (), t+dt, row_idcs)
VF = svd(rows).V
@views corange_weights = VF/VF[col_idcs,:]
else
@views corange_weights = corange/corange[col_idcs,:]
end
return range_weights, corange_weights
end

function update_interpolation!(interpolation_cache, u, t, dt)
@unpack Π, Π_L, Π_K, Π_S, params = interpolation_cache
@unpack selection_alg = params

# eval range/corange spans
UF, VF, row_idcs, col_idcs = sparse_selection(interpolation_cache, u, t, dt)
range_weights, corange_weights = compute_weights(UF, VF, row_idcs, col_idcs, u, t, dt, interpolation_cache)

# new interpolators
range = DEIMInterpolator(row_idcs, range_weights)
projected_range = DEIMInterpolator(row_idcs, u.U'*range_weights)
corange = DEIMInterpolator(col_idcs, corange_weights)
projected_corange = DEIMInterpolator(col_idcs, u.V'*corange_weights)

# update function interpolators
update_interpolator!(Π_L, projected_range)
update_interpolator!(Π_K, projected_corange')
update_interpolator!(Π_S, SparseMatrixInterpolator(projected_range, projected_corange))
update_interpolator!(Π, SparseMatrixInterpolator(range, corange))
end

# without rank adaptation
#=
function update_interpolation!(interpolation_cache, u, t)
@unpack params, Π, Π_K, Π_L, Π_S = interpolation_cache
@unpack selection_alg, tol, rmin, rmax = params

eval!(Π_L, u, (), t) # rows
eval!(Π_K, u, (), t) # columns
VF = truncated_svd(Π_L.cache.rows, tol=tol, rmin=rmin, rmax=rmax).V # corange from rows
UF = truncated_svd(Π_K.cache.cols, tol=tol, rmin=rmin, rmax=rmax).U # range from cols

# find interpolation indices
row_idcs = index_selection(range_svd.U, selection_alg)
col_idcs = index_selection(corange_svd.V, selection_alg)

# find interpolation weights
@views range_weights = UF/UF[row_idcs,:]
@views corange_weights = VF/VF[col_idcs,:]

# new interpolators
range = DEIMInterpolator(row_idcs, range_weights)
projected_range = DEIMInterpolator(row_idcs, u.U'*range_weights)
corange = DEIMInterpolator(col_idcs, corange_weights)
projected_corange = DEIMInterpolator(col_idcs, u.V'*corange_weights)

# update function interpolators
update_interpolator!(Π_L, projected_range)
update_interpolator!(Π_K, projected_corange')
update_interpolator!(Π_S, SparseMatrixInterpolator(projected_range, projected_corange))
update_interpolator!(Π, SparseMatrixInterpolator(range, corange))
end

# alternative but max rank limited by 2*rank(u)
# ... not great
function update_interpolation!(interpolation_cache, u_old, u, dt)
@unpack params, Π, Π_K, Π_L, Π_S = interpolation_cache
@unpack selection_alg, tol, rmin, rmax = params

dF = u - u_old
dF_svd = svd(dF)
#println(typeof(dF_svd.S))
cutoff = findfirst(s -> s < tol/dt, diag(dF_svd.S) )
if isnothing(cutoff)
# compute the full function and take derivative
cutoff= size(dF_svd.S,1)
else
println(dF_svd.S[cutoff])
cutoff = max(rmin, min(cutoff, rmax))
end

if cutoff != rank(Π_K)
println("new DEIM rank: $cutoff")
end

# find interpolation indices
@views row_idcs = index_selection(dF_svd.U[:,1:cutoff], selection_alg)
@views col_idcs = index_selection(dF_svd.V[:,1:cutoff], selection_alg)

# find interpolation weights
@views range_weights = dF_svd.U/dF_svd.U[row_idcs,:]
@views corange_weights = dF_svd.V/dF_svd.V[col_idcs,:]

# new interpolators
range = DEIMInterpolator(row_idcs, range_weights)
projected_range = DEIMInterpolator(row_idcs, u.U'*range_weights)
corange = DEIMInterpolator(col_idcs, corange_weights)
projected_corange = DEIMInterpolator(col_idcs, u.V'*corange_weights)

# update function interpolators
update_interpolator!(Π_L, projected_range)
update_interpolator!(Π_K, projected_corange')
update_interpolator!(Π_S, SparseMatrixInterpolator(projected_range, projected_corange))
update_interpolator!(Π, SparseMatrixInterpolator(range, corange))
end
=#
Loading
Loading