Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch RK methods to use an inf norm for eigen_est #2204

Merged
merged 4 commits into from
May 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/composite_algs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ function is_stiff(integrator, alg, ntol, stol, is_stiffalg)
stiffness = abs(eigen_est * dt / alg_stability_size(alg)) # `abs` here is just for safety
tol = is_stiffalg ? stol : ntol
os = oneunit(stiffness)
bool = stiffness > os * tol
bool = !(stiffness <= os * tol)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why this change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in case eigen_est is NaN. It's a pretty good sign that things are going wrong, but since it's an estimate, if the estimate is NaN we really shouldn't be using an explicit solver.


if !bool
integrator.alg.choice_function.successive_switches += 1
Expand Down Expand Up @@ -111,7 +111,7 @@ current_nonstiff(current) = ifelse(current <= NUM_NONSTIFF, current, current - N
function DefaultODEAlgorithm(; lazy = true, stiffalgfirst = false, kwargs...)
nonstiff = (Tsit5(), Vern7(lazy = lazy))
stiff = (Rosenbrock23(; kwargs...), Rodas5P(; kwargs...), FBDF(; kwargs...),
FBDF(; linsolve = LinearSolve.KrylovJL_GMRES()))
FBDF(; linsolve = LinearSolve.KrylovJL_GMRES(), kwargs...))
AutoAlgSwitch(nonstiff, stiff; stiffalgfirst)
end

Expand Down
59 changes: 29 additions & 30 deletions src/derivative_utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@
if !(p isa DiffEqBase.NullParameters)
uf.p = p
end

jacobian!(J, uf, uprev, du1, integrator, jac_config)
end
end
Expand Down Expand Up @@ -294,7 +293,7 @@
Base.eltype(W::WOperator) = eltype(W.J)

# In WOperator update_coefficients!, accept both missing u/p/t and missing dtgamma/transform and don't update them in that case.
# This helps support partial updating logic used with Newton solvers.
# This helps support partial updating logic used with Newton solvers.
function SciMLOperators.update_coefficients!(W::WOperator,
u = nothing,
p = nothing,
Expand Down Expand Up @@ -483,16 +482,16 @@
end

function jacobian2W!(
W::AbstractMatrix, mass_matrix::MT, dtgamma::Number, J::AbstractMatrix,
W_transform::Bool)::Nothing where {MT}
W::AbstractMatrix, mass_matrix, dtgamma::Number, J::AbstractMatrix,
W_transform::Bool)::Nothing
# check size and dimension
iijj = axes(W)
@boundscheck (iijj == axes(J) && length(iijj) == 2) || _throwWJerror(W, J)
mass_matrix isa UniformScaling ||
@boundscheck axes(mass_matrix) == axes(W) || _throwWMerror(W, mass_matrix)
@inbounds if W_transform
invdtgamma = inv(dtgamma)
if MT <: UniformScaling
if mass_matrix isa UniformScaling
copyto!(W, J)
idxs = diagind(W)
λ = -mass_matrix.λ
Expand All @@ -508,28 +507,9 @@
@.. broadcast=false W=muladd(-mass_matrix, invdtgamma, J)
end
else
if MT <: UniformScaling
if mass_matrix isa UniformScaling
λ = -mass_matrix.λ
if W isa AbstractSparseMatrix && !(W isa SparseMatrixCSC)
# This is specifically to catch the GPU sparse matrix cases
# Which do not support diagonal indexing
# https://github.com/JuliaGPU/CUDA.jl/issues/1395

Wn = nonzeros(W)
Jn = nonzeros(J)

# I would hope to check this generically, but `CuSparseMatrixCSC` has `colPtr`
# and `rowVal` while SparseMatrixCSC is colptr and rowval, and there is no
# standard for checking sparsity patterns in general. So for now, write it for
# the convention of CUDA.jl and handle the case of some other convention when
# it comes up.

@assert J.colPtr == W.colPtr
@assert J.rowVal == W.rowVal

@.. broadcast=false Wn=dtgamma * Jn
W .= W + λ * I
elseif W isa SparseMatrixCSC
if W isa SparseMatrixCSC
#=
using LinearAlgebra, SparseArrays, FastBroadcast
J = sparse(Diagonal(ones(4)))
Expand All @@ -553,6 +533,25 @@
@.. broadcast=false W.nzval=dtgamma * J.nzval
idxs = diagind(W)
@.. broadcast=false @view(W[idxs])=@view(W[idxs]) + λ
elseif W isa AbstractSparseMatrix

Check warning on line 536 in src/derivative_utils.jl

View check run for this annotation

Codecov / codecov/patch

src/derivative_utils.jl#L536

Added line #L536 was not covered by tests
# This is specifically to catch the GPU sparse matrix cases
# Which do not support diagonal indexing
# https://github.com/JuliaGPU/CUDA.jl/issues/1395

Wn = nonzeros(W)
Jn = nonzeros(J)

Check warning on line 542 in src/derivative_utils.jl

View check run for this annotation

Codecov / codecov/patch

src/derivative_utils.jl#L541-L542

Added lines #L541 - L542 were not covered by tests

# I would hope to check this generically, but `CuSparseMatrixCSC` has `colPtr`
# and `rowVal` while SparseMatrixCSC is colptr and rowval, and there is no
# standard for checking sparsity patterns in general. So for now, write it for
# the convention of CUDA.jl and handle the case of some other convention when
# it comes up.

@assert J.colPtr == W.colPtr
@assert J.rowVal == W.rowVal

Check warning on line 551 in src/derivative_utils.jl

View check run for this annotation

Codecov / codecov/patch

src/derivative_utils.jl#L550-L551

Added lines #L550 - L551 were not covered by tests

@.. broadcast=false Wn=dtgamma * Jn
W .= W + λ * I

Check warning on line 554 in src/derivative_utils.jl

View check run for this annotation

Codecov / codecov/patch

src/derivative_utils.jl#L553-L554

Added lines #L553 - L554 were not covered by tests
else # Anything not a sparse matrix
@.. broadcast=false W=dtgamma * J
idxs = diagind(W)
Expand All @@ -565,16 +564,16 @@
return nothing
end

function jacobian2W!(W::Matrix, mass_matrix::MT, dtgamma::Number, J::Matrix,
W_transform::Bool)::Nothing where {MT}
function jacobian2W!(W::Matrix, mass_matrix, dtgamma::Number, J::Matrix,
W_transform::Bool)::Nothing
# check size and dimension
iijj = axes(W)
@boundscheck (iijj == axes(J) && length(iijj) == 2) || _throwWJerror(W, J)
mass_matrix isa UniformScaling ||
@boundscheck axes(mass_matrix) == axes(W) || _throwWMerror(W, mass_matrix)
@inbounds if W_transform
invdtgamma = inv(dtgamma)
if MT <: UniformScaling
if mass_matrix isa UniformScaling
copyto!(W, J)
idxs = diagind(W)
λ = -mass_matrix.λ
Expand All @@ -587,7 +586,7 @@
end
end
else
if MT <: UniformScaling
if mass_matrix isa UniformScaling
idxs = diagind(W)
@inbounds @simd ivdep for i in eachindex(W)
W[i] = dtgamma * J[i]
Expand Down
16 changes: 6 additions & 10 deletions src/perform_step/explicit_rk_perform_step.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,9 @@
u = uprev + dt * accum

if integrator.alg isa CompositeAlgorithm
# Hairer II, page 22
ϱu = integrator.opts.internalnorm(kk[end] - kk[end - 1], t)
ϱd = integrator.opts.internalnorm(u - u_beforefinal, t)
integrator.eigen_est = ϱu / ϱd
# Hairer II, page 22 modified to use Inf norm
n = maximum(abs.((kk[end] .- kk[end - 1]) / (u .- u_beforefinal)))
integrator.eigen_est = integrator.opts.internalnorm(n, t)

Check warning on line 53 in src/perform_step/explicit_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/explicit_rk_perform_step.jl#L52-L53

Added lines #L52 - L53 were not covered by tests
end

if integrator.opts.adaptive
Expand Down Expand Up @@ -221,12 +220,9 @@
end

if integrator.alg isa CompositeAlgorithm
# Hairer II, page 22
@.. broadcast=false utilde=kk[end] - kk[end - 1]
ϱu = integrator.opts.internalnorm(utilde, t)
@.. broadcast=false utilde=u - tmp
ϱd = integrator.opts.internalnorm(utilde, t)
integrator.eigen_est = ϱu / ϱd
# Hairer II, page 22 modified to use Inf norm
@.. broadcast=false utilde=abs((kk[end] - kk[end - 1]) / (u - tmp))
integrator.eigen_est = integrator.opts.internalnorm(maximum(utilde), t)

Check warning on line 225 in src/perform_step/explicit_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/explicit_rk_perform_step.jl#L224-L225

Added lines #L224 - L225 were not covered by tests
end

if integrator.opts.adaptive
Expand Down
93 changes: 10 additions & 83 deletions src/perform_step/low_order_rk_perform_step.jl
Original file line number Diff line number Diff line change
Expand Up @@ -761,9 +761,8 @@
integrator.stats.nf += 6
if integrator.alg isa CompositeAlgorithm
g7 = u
# Hairer II, page 22
integrator.eigen_est = integrator.opts.internalnorm(k7 - k6, t) /
integrator.opts.internalnorm(g7 - g6, t)
# Hairer II, page 22 modified to use the Inf norm
integrator.eigen_est = integrator.opts.internalnorm(maximum(abs.(k7 .- k6) ./ (g7 .- g6)), t)

Check warning on line 765 in src/perform_step/low_order_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/low_order_rk_perform_step.jl#L765

Added line #L765 was not covered by tests
end
if integrator.opts.adaptive
utilde = dt *
Expand Down Expand Up @@ -836,12 +835,9 @@
if integrator.alg isa CompositeAlgorithm
g7 = u
g6 = tmp
# Hairer II, page 22
@.. broadcast=false thread=thread utilde=k7 - k6
ϱu = integrator.opts.internalnorm(utilde, t)
@.. broadcast=false thread=thread utilde=g7 - g6
ϱd = integrator.opts.internalnorm(utilde, t)
integrator.eigen_est = ϱu / ϱd
# Hairer II, page 22 modified to use Inf norm
@.. broadcast=false thread=thread utilde=abs((k7 - k6) / (g7 - g6))
integrator.eigen_est = integrator.opts.internalnorm(maximum(utilde), t)
end
if integrator.opts.adaptive
@.. broadcast=false thread=thread utilde=dt * (btilde1 * k1 + btilde2 * k2 +
Expand Down Expand Up @@ -889,9 +885,8 @@
integrator.stats.nf += 6
if integrator.alg isa CompositeAlgorithm
g7 = u
# Hairer II, page 22
integrator.eigen_est = integrator.opts.internalnorm(k7 - k6, t) /
integrator.opts.internalnorm(g7 - g6, t)
# Hairer II, page 22 modified to use the Inf norm
integrator.eigen_est = integrator.opts.internalnorm(maximum(abs.(k7 .- k6) ./ (g7 .- g6)), t)

Check warning on line 889 in src/perform_step/low_order_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/low_order_rk_perform_step.jl#L889

Added line #L889 was not covered by tests
end
if integrator.opts.adaptive
utilde = dt *
Expand Down Expand Up @@ -957,12 +952,9 @@
if integrator.alg isa CompositeAlgorithm
g6 = tmp
g7 = u
# Hairer II, page 22
@.. broadcast=false thread=thread g6=g7 - g6
ϱd = integrator.opts.internalnorm(g6, t)
@.. broadcast=false thread=thread tmp=k7 - k6
ϱu = integrator.opts.internalnorm(tmp, t)
integrator.eigen_est = (ϱu / ϱd) * oneunit(t)
# Hairer II, page 22 modified to use Inf norm
@.. broadcast=false thread=thread utilde=abs((k7 - k6) / (g7 - g6))
integrator.eigen_est = integrator.opts.internalnorm(maximum(utilde) * oneunit(t), t)

Check warning on line 957 in src/perform_step/low_order_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/low_order_rk_perform_step.jl#L956-L957

Added lines #L956 - L957 were not covered by tests
end
if integrator.opts.adaptive
@.. broadcast=false thread=thread utilde=dt * (btilde1 * k1 + btilde3 * k3 +
Expand All @@ -987,71 +979,6 @@
return nothing
end

#=
@muladd function perform_step!(integrator, cache::DP5Cache, repeat_step=false)
@unpack t,dt,uprev,u,f,p = integrator
uidx = eachindex(integrator.uprev)
@unpack a21,a31,a32,a41,a42,a43,a51,a52,a53,a54,a61,a62,a63,a64,a65,a71,a73,a74,a75,a76,btilde1,btilde3,btilde4,btilde5,btilde6,btilde7,c1,c2,c3,c4,c5,c6 = cache.tab
@unpack k1,k2,k3,k4,k5,k6,k7,dense_tmp3,dense_tmp4,update,bspl,utilde,tmp,atmp = cache
@unpack d1,d3,d4,d5,d6,d7 = cache.tab
a = dt*a21
@tight_loop_macros for i in uidx
@inbounds tmp[i] = uprev[i]+a*k1[i]
end
f(k2, tmp, p, t+c1*dt)
@tight_loop_macros for i in uidx
@inbounds tmp[i] = uprev[i]+dt*(a31*k1[i]+a32*k2[i])
end
f(k3, tmp, p, t+c2*dt)
@tight_loop_macros for i in uidx
@inbounds tmp[i] = uprev[i]+dt*(a41*k1[i]+a42*k2[i]+a43*k3[i])
end
f(k4, tmp, p, t+c3*dt)
@tight_loop_macros for i in uidx
@inbounds tmp[i] = uprev[i]+dt*(a51*k1[i]+a52*k2[i]+a53*k3[i]+a54*k4[i])
end
f(k5, tmp, p, t+c4*dt)
@tight_loop_macros for i in uidx
@inbounds tmp[i] = uprev[i]+dt*(a61*k1[i]+a62*k2[i]+a63*k3[i]+a64*k4[i]+a65*k5[i])
end
f(k6, tmp, p, t+dt)
@tight_loop_macros for i in uidx
@inbounds update[i] = a71*k1[i]+a73*k3[i]+a74*k4[i]+a75*k5[i]+a76*k6[i]
@inbounds u[i] = uprev[i]+dt*update[i]
end
f(k7, u, p, t+dt)
integrator.stats.nf += 6
if integrator.alg isa CompositeAlgorithm
g6 = tmp
g7 = u
# Hairer II, page 22
ϱu, ϱd = zero(eltype(k7))^2, zero(eltype(g7))^2
@inbounds for i in eachindex(k7)
ϱu += (k7[i] - k6[i])^2
ϱd += (g7[i] - g6[i])^2
end
integrator.eigen_est = sqrt(ϱu/ϱd)*oneunit(t)
end
if integrator.opts.adaptive
@tight_loop_macros for i in uidx
@inbounds utilde[i] = dt*(btilde1*k1[i] + btilde3*k3[i] + btilde4*k4[i] + btilde5*k5[i] + btilde6*k6[i] + btilde7*k7[i])
end
calculate_residuals!(atmp, utilde, uprev, u, integrator.opts.abstol, integrator.opts.reltol,integrator.opts.internalnorm,t)
integrator.EEst = integrator.opts.internalnorm(atmp,t)
end
if integrator.opts.calck
@tight_loop_macros for i in uidx
#integrator.k[4] == k5
@inbounds integrator.k[4][i] = d1*k1[i]+d3*k3[i]+d4*k4[i]+d5*k5[i]+d6*k6[i]+d7*k7[i]
#bspl == k3
@inbounds bspl[i] = k1[i] - update[i]
# k6 === integrator.k[3] === k2
@inbounds integrator.k[3][i] = update[i] - k7[i] - bspl[i]
end
end
end
=#

function initialize!(integrator, cache::KYK2014DGSSPRK_3S2_ConstantCache)
integrator.fsalfirst = integrator.f(integrator.uprev, integrator.p, integrator.t) # Pre-start fsal
integrator.stats.nf += 1
Expand Down
44 changes: 12 additions & 32 deletions src/perform_step/verner_rk_perform_step.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,7 @@
integrator.stats.nf += 8
if integrator.alg isa CompositeAlgorithm
g9 = u
ϱu = integrator.opts.internalnorm(k9 - k8, t)
ϱd = integrator.opts.internalnorm(g9 - g8, t)
integrator.eigen_est = ϱu / ϱd
integrator.eigen_est = integrator.opts.internalnorm(maximum(abs.((k9 .- k8) ./ (g9 .- g8))), t)

Check warning on line 43 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L43

Added line #L43 was not covered by tests
end
if integrator.opts.adaptive
utilde = dt *
Expand Down Expand Up @@ -163,11 +161,8 @@
if integrator.alg isa CompositeAlgorithm
g9 = u
g8 = tmp
@.. broadcast=false thread=thread rtmp=k9 - k8
ϱu = integrator.opts.internalnorm(rtmp, t)
@.. broadcast=false thread=thread utilde=g9 - g8
ϱd = integrator.opts.internalnorm(utilde, t)
integrator.eigen_est = ϱu / ϱd
@.. broadcast=false thread=thread rtmp=abs((k9 - k8) / (g9 - g8))
integrator.eigen_est = integrator.opts.internalnorm(maximum(rtmp), t)

Check warning on line 165 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L164-L165

Added lines #L164 - L165 were not covered by tests
end
if integrator.opts.adaptive
@.. broadcast=false thread=thread utilde=dt * (btilde1 * k1 + btilde4 * k4 +
Expand Down Expand Up @@ -252,9 +247,7 @@
integrator.stats.nf += 10
u = uprev + dt * (b1 * k1 + b4 * k4 + b5 * k5 + b6 * k6 + b7 * k7 + b8 * k8 + b9 * k9)
if integrator.alg isa CompositeAlgorithm
ϱu = integrator.opts.internalnorm(k10 - k9, t)
ϱd = integrator.opts.internalnorm(g10 - g9, t)
integrator.eigen_est = ϱu / ϱd
integrator.eigen_est = integrator.opts.internalnorm(maximum(abs.((k10 .- k9) ./ (g10 .- g9))), t)

Check warning on line 250 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L250

Added line #L250 was not covered by tests
end
if integrator.opts.adaptive
utilde = dt *
Expand Down Expand Up @@ -415,11 +408,8 @@
if integrator.alg isa CompositeAlgorithm
g10 = u
g9 = tmp
@.. broadcast=false thread=thread rtmp=k10 - k9
ϱu = integrator.opts.internalnorm(rtmp, t)
@.. broadcast=false thread=thread utilde=g10 - g9
ϱd = integrator.opts.internalnorm(utilde, t)
integrator.eigen_est = ϱu / ϱd
@.. broadcast=false thread=thread rtmp=abs((k10 - k9) / (g10 - g9))
integrator.eigen_est = integrator.opts.internalnorm(maximum(rtmp), t)

Check warning on line 412 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L411-L412

Added lines #L411 - L412 were not covered by tests
end
if integrator.opts.adaptive
@.. broadcast=false thread=thread utilde=dt * (btilde1 * k1 + btilde4 * k4 +
Expand Down Expand Up @@ -547,9 +537,7 @@
dt * (b1 * k1 + b6 * k6 + b7 * k7 + b8 * k8 + b9 * k9 + b10 * k10 + b11 * k11 +
b12 * k12)
if integrator.alg isa CompositeAlgorithm
ϱu = integrator.opts.internalnorm(k13 - k12, t)
ϱd = integrator.opts.internalnorm(g13 - g12, t)
integrator.eigen_est = ϱu / ϱd
integrator.eigen_est = integrator.opts.internalnorm(maximum(abs.((k13 .- k12) ./ (g13 .- g12))), t)

Check warning on line 540 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L540

Added line #L540 was not covered by tests
end
if integrator.opts.adaptive
utilde = dt *
Expand Down Expand Up @@ -739,11 +727,8 @@
if integrator.alg isa CompositeAlgorithm
g13 = u
g12 = tmp
@.. broadcast=false thread=thread rtmp=k13 - k12
ϱu = integrator.opts.internalnorm(rtmp, t)
@.. broadcast=false thread=thread utilde=g13 - g12
ϱd = integrator.opts.internalnorm(utilde, t)
integrator.eigen_est = ϱu / ϱd
@.. broadcast=false thread=thread rtmp = abs((k13 - k12) / (g13 - g12))
integrator.eigen_est = integrator.opts.internalnorm(maximum(rtmp), t)

Check warning on line 731 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L730-L731

Added lines #L730 - L731 were not covered by tests
end
@.. broadcast=false thread=thread u=uprev +
dt *
Expand Down Expand Up @@ -918,9 +903,7 @@
dt * (b1 * k1 + b8 * k8 + b9 * k9 + b10 * k10 + b11 * k11 + b12 * k12 + b13 * k13 +
b14 * k14 + b15 * k15)
if integrator.alg isa CompositeAlgorithm
ϱu = integrator.opts.internalnorm(k16 - k15, t)
ϱd = integrator.opts.internalnorm(g16 - g15, t)
integrator.eigen_est = ϱu / ϱd
integrator.eigen_est = integrator.opts.internalnorm(maximum(abs.((k16 .- k15) ./ (g16 .- g15))), t)

Check warning on line 906 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L906

Added line #L906 was not covered by tests
end
if integrator.opts.adaptive
utilde = dt * (btilde1 * k1 + btilde8 * k8 + btilde9 * k9 + btilde10 * k10 +
Expand Down Expand Up @@ -1148,11 +1131,8 @@
if integrator.alg isa CompositeAlgorithm
g16 = u
g15 = tmp
@.. broadcast=false thread=thread rtmp=k16 - k15
ϱu = integrator.opts.internalnorm(rtmp, t)
@.. broadcast=false thread=thread utilde=g16 - g15
ϱd = integrator.opts.internalnorm(utilde, t)
integrator.eigen_est = ϱu / ϱd
@.. broadcast=false thread=thread rtmp=abs((k16 - k15) / (g16 - g15))
integrator.eigen_est = integrator.opts.internalnorm(maximum(rtmp), t)

Check warning on line 1135 in src/perform_step/verner_rk_perform_step.jl

View check run for this annotation

Codecov / codecov/patch

src/perform_step/verner_rk_perform_step.jl#L1134-L1135

Added lines #L1134 - L1135 were not covered by tests
end
@.. broadcast=false thread=thread u=uprev +
dt *
Expand Down
Loading
Loading