Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add subcell limiting support for StructuredMesh #1946

Merged
merged 24 commits into from
May 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
b1cdce7
Add structured mesh support
bennibolm May 14, 2024
c1af151
Fix non-periodic computation of bounds
bennibolm May 15, 2024
2ddd78a
Use local limiting and nonperiodic domain in source terms elixir
bennibolm May 15, 2024
98c2144
Use local limiting in free stream elixir
bennibolm May 15, 2024
ccb833e
Remove not needed lines
bennibolm May 15, 2024
18dc2e1
Merge branch 'main' into bb/subcell-limiting-structuredmesh
bennibolm May 15, 2024
003d43c
Remove P4estMesh
bennibolm May 15, 2024
e918af0
Add non-periodic tests with local bounds
bennibolm May 15, 2024
5a7b8ce
fmt
bennibolm May 15, 2024
632a934
Fix test
bennibolm May 16, 2024
b26a8f2
Use `get_inverse_jacobian` instead of dispatching all routines
bennibolm May 16, 2024
8c5047a
Simplify `perform_idp_correction!`
bennibolm May 16, 2024
80dd9bf
Revert stuff
bennibolm May 16, 2024
7102fbd
Remove free stream elixir
bennibolm May 16, 2024
51c9fe5
Merge branch 'main' into bb/subcell-limiting-structuredmesh
bennibolm May 16, 2024
0279dd0
Merge branch 'main' into bb/subcell-limiting-structuredmesh
bennibolm May 17, 2024
dc6d554
Use sedov blast instead of source term setup; add news
bennibolm May 22, 2024
ee86e27
Update dispatching for mesh types
bennibolm May 22, 2024
793ebc2
Merge branch 'main' into bb/subcell-limiting-structuredmesh
bennibolm May 22, 2024
db135df
Merge branch 'main' into bb/subcell-limiting-structuredmesh
bennibolm May 23, 2024
7f8f58d
Move new tests within test file
bennibolm May 23, 2024
40f475c
Adapt dispatching
bennibolm May 23, 2024
12400ee
Fix typo
bennibolm May 23, 2024
83a8cef
Remove not-needed parameters
bennibolm May 23, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ for human readability.
- Subcell local one-sided limiting support for nonlinear variables in 2D for `TreeMesh` ([#1792]).
- New time integrator `PairedExplicitRK2`, implementing the second-order paired explicit Runge-Kutta
method with [Convex.jl](https://github.com/jump-dev/Convex.jl) and [ECOS.jl](https://github.com/jump-dev/ECOS.jl) ([#1908])
- Add subcell limiting support for `StructuredMesh` ([#1946]).

## Changes when updating to v0.7 from v0.6.x

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
using OrdinaryDiffEq
using Trixi

###############################################################################
# semidiscretization of the compressible Euler equations
gamma = 1.4
equations = CompressibleEulerEquations2D(gamma)

"""
initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D)

The Sedov blast wave setup based on Flash
- https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000
"""
function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D)
# Set up polar coordinates
inicenter = SVector(0.0, 0.0)
x_norm = x[1] - inicenter[1]
y_norm = x[2] - inicenter[2]
r = sqrt(x_norm^2 + y_norm^2)

# Setup based on https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000
r0 = 0.21875 # = 3.5 * smallest dx (for domain length=4 and max-ref=6)
# r0 = 0.5 # = more reasonable setup
E = 1.0
p0_inner = 3 * (equations.gamma - 1) * E / (3 * pi * r0^2)
p0_outer = 1.0e-5 # = true Sedov setup
# p0_outer = 1.0e-3 # = more reasonable setup

# Calculate primitive variables
rho = 1.0
v1 = 0.0
v2 = 0.0
p = r > r0 ? p0_outer : p0_inner

return prim2cons(SVector(rho, v1, v2, p), equations)
end
initial_condition = initial_condition_sedov_blast_wave

boundary_condition = BoundaryConditionDirichlet(initial_condition)
boundary_conditions = (x_neg = boundary_condition,
x_pos = boundary_condition,
y_neg = boundary_condition,
y_pos = boundary_condition)

surface_flux = flux_lax_friedrichs
volume_flux = flux_ranocha
polydeg = 3
basis = LobattoLegendreBasis(polydeg)
limiter_idp = SubcellLimiterIDP(equations, basis;
local_twosided_variables_cons = ["rho"],
local_onesided_variables_nonlinear = [(Trixi.entropy_guermond_etal,
min)],
max_iterations_newton = 40, # Default value of 10 iterations is too low to fulfill bounds.
positivity_variables_cons = [],
positivity_variables_nonlinear = [])
# Variables for global limiting (`positivity_variables_cons` and
# `positivity_variables_nonlinear`) are overwritten and used in the tests.

volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
volume_flux_dg = volume_flux,
volume_flux_fv = surface_flux)
solver = DGSEM(basis, surface_flux, volume_integral)

# Get the curved quad mesh from a mapping function
# Mapping as described in https://arxiv.org/abs/2012.12040
function mapping(xi, eta)
y = eta + 0.125 * (cos(1.5 * pi * xi) * cos(0.5 * pi * eta))

x = xi + 0.125 * (cos(0.5 * pi * xi) * cos(2 * pi * y))

return SVector(x, y)
end

cells_per_dimension = (16, 16)
mesh = StructuredMesh(cells_per_dimension, mapping, periodicity = false)

semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
boundary_conditions = boundary_conditions)

###############################################################################
# ODE solvers, callbacks etc.

tspan = (0.0, 3.0)
ode = semidiscretize(semi, tspan)

summary_callback = SummaryCallback()

analysis_interval = 100
analysis_callback = AnalysisCallback(semi, interval = analysis_interval)

alive_callback = AliveCallback(analysis_interval = analysis_interval)

save_solution = SaveSolutionCallback(interval = 100,
save_initial_solution = true,
save_final_solution = true,
solution_variables = cons2prim)

stepsize_callback = StepsizeCallback(cfl = 0.7)

callbacks = CallbackSet(summary_callback,
analysis_callback, alive_callback,
save_solution,
stepsize_callback)

###############################################################################
# run the simulation

stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback())

sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks);
dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
save_everystep = false, callback = callbacks);
summary_callback() # print the timer summary
12 changes: 6 additions & 6 deletions src/callbacks_stage/subcell_bounds_check.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ function (callback::BoundsCheckCallback)(u_ode, integrator, stage)
(; t, iter, alg) = integrator
u = wrap_array(u_ode, mesh, equations, solver, cache)

@trixi_timeit timer() "check_bounds" check_bounds(u, mesh, equations, solver, cache,
@trixi_timeit timer() "check_bounds" check_bounds(u, equations, solver, cache,
solver.volume_integral)

save_errors = callback.save_errors && (callback.interval > 0) &&
Expand All @@ -48,20 +48,20 @@ function (callback::BoundsCheckCallback)(u_ode, integrator, stage)
(iter + 1) >= integrator.opts.maxiters) # Maximum iterations reached
if save_errors
@trixi_timeit timer() "save_errors" save_bounds_check_errors(callback.output_directory,
u, t, iter + 1,
t, iter + 1,
equations,
solver.volume_integral)
end
end

@inline function check_bounds(u, mesh, equations, solver, cache,
@inline function check_bounds(u, equations, solver, cache,
volume_integral::VolumeIntegralSubcellLimiting)
check_bounds(u, mesh, equations, solver, cache, volume_integral.limiter)
check_bounds(u, equations, solver, cache, volume_integral.limiter)
end

@inline function save_bounds_check_errors(output_directory, u, t, iter, equations,
@inline function save_bounds_check_errors(output_directory, t, iter, equations,
volume_integral::VolumeIntegralSubcellLimiting)
save_bounds_check_errors(output_directory, u, t, iter, equations,
save_bounds_check_errors(output_directory, t, iter, equations,
volume_integral.limiter)
end

Expand Down
4 changes: 2 additions & 2 deletions src/callbacks_stage/subcell_bounds_check_2d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
@muladd begin
#! format: noindent

@inline function check_bounds(u, mesh::AbstractMesh{2}, equations, solver, cache,
@inline function check_bounds(u, equations, solver, cache,
limiter::SubcellLimiterIDP)
(; local_twosided, positivity, local_onesided) = solver.volume_integral.limiter
(; variable_bounds) = limiter.cache.subcell_limiter_coefficients
Expand Down Expand Up @@ -103,7 +103,7 @@
return nothing
end

@inline function save_bounds_check_errors(output_directory, u, time, iter, equations,
@inline function save_bounds_check_errors(output_directory, time, iter, equations,
limiter::SubcellLimiterIDP)
(; local_twosided, positivity, local_onesided) = limiter
(; idp_bounds_delta_local) = limiter.cache
Expand Down
27 changes: 15 additions & 12 deletions src/callbacks_stage/subcell_limiter_idp_correction_2d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,29 +5,32 @@
@muladd begin
#! format: noindent

function perform_idp_correction!(u, dt, mesh::TreeMesh2D, equations, dg, cache)
function perform_idp_correction!(u, dt,
mesh::Union{TreeMesh{2}, StructuredMesh{2}},
equations, dg, cache)
@unpack inverse_weights = dg.basis
@unpack antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R = cache.antidiffusive_fluxes
@unpack alpha1, alpha2 = dg.volume_integral.limiter.cache.subcell_limiter_coefficients

@threaded for element in eachelement(dg, cache)
# Sign switch as in apply_jacobian!
inverse_jacobian = -cache.elements.inverse_jacobian[element]

for j in eachnode(dg), i in eachnode(dg)
# Sign switch as in apply_jacobian!
inverse_jacobian = -get_inverse_jacobian(cache.elements.inverse_jacobian,
mesh, i, j, element)

# Note: antidiffusive_flux1[v, i, xi, element] = antidiffusive_flux2[v, xi, i, element] = 0 for all i in 1:nnodes and xi in {1, nnodes+1}
alpha_flux1 = (1 - alpha1[i, j, element]) *
get_node_vars(antidiffusive_flux1_R, equations, dg, i, j,
element)
get_node_vars(antidiffusive_flux1_R, equations, dg,
i, j, element)
alpha_flux1_ip1 = (1 - alpha1[i + 1, j, element]) *
get_node_vars(antidiffusive_flux1_L, equations, dg, i + 1,
j, element)
get_node_vars(antidiffusive_flux1_L, equations, dg,
i + 1, j, element)
alpha_flux2 = (1 - alpha2[i, j, element]) *
get_node_vars(antidiffusive_flux2_R, equations, dg, i, j,
element)
get_node_vars(antidiffusive_flux2_R, equations, dg,
i, j, element)
alpha_flux2_jp1 = (1 - alpha2[i, j + 1, element]) *
get_node_vars(antidiffusive_flux2_L, equations, dg, i,
j + 1, element)
get_node_vars(antidiffusive_flux2_L, equations, dg,
i, j + 1, element)

for v in eachvariable(equations)
u[v, i, j, element] += dt * inverse_jacobian *
Expand Down
3 changes: 3 additions & 0 deletions src/solvers/dgsem_structured/dg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,9 @@ include("indicators_1d.jl")
include("indicators_2d.jl")
include("indicators_3d.jl")

include("subcell_limiters_2d.jl")
include("dg_2d_subcell_limiters.jl")

# Specialized implementations used to improve performance
include("dg_2d_compressible_euler.jl")
include("dg_3d_compressible_euler.jl")
Expand Down
111 changes: 111 additions & 0 deletions src/solvers/dgsem_structured/dg_2d_subcell_limiters.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
# Since these FMAs can increase the performance of many numerical algorithms,
# we need to opt-in explicitly.
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
@muladd begin
#! format: noindent

# Calculate the DG staggered volume fluxes `fhat` in subcell FV-form inside the element
# (**without non-conservative terms**).
#
# See also `flux_differencing_kernel!`.
@inline function calcflux_fhat!(fhat1_L, fhat1_R, fhat2_L, fhat2_R, u,
mesh::StructuredMesh{2},
nonconservative_terms::False, equations,
volume_flux, dg::DGSEM, element, cache)
(; contravariant_vectors) = cache.elements
(; weights, derivative_split) = dg.basis
(; flux_temp_threaded) = cache

flux_temp = flux_temp_threaded[Threads.threadid()]

# The FV-form fluxes are calculated in a recursive manner, i.e.:
# fhat_(0,1) = w_0 * FVol_0,
# fhat_(j,j+1) = fhat_(j-1,j) + w_j * FVol_j, for j=1,...,N-1,
# with the split form volume fluxes FVol_j = -2 * sum_i=0^N D_ji f*_(j,i).

# To use the symmetry of the `volume_flux`, the split form volume flux is precalculated
# like in `calc_volume_integral!` for the `VolumeIntegralFluxDifferencing`
# and saved in in `flux_temp`.

# Split form volume flux in orientation 1: x direction
flux_temp .= zero(eltype(flux_temp))

for j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations, dg, i, j, element)

# pull the contravariant vectors in each coordinate direction
Ja1_node = get_contravariant_vector(1, contravariant_vectors, i, j, element) # x direction

# All diagonal entries of `derivative_split` are zero. Thus, we can skip
# the computation of the diagonal terms. In addition, we use the symmetry
# of the `volume_flux` to save half of the possible two-point flux
# computations.

# x direction
for ii in (i + 1):nnodes(dg)
u_node_ii = get_node_vars(u, equations, dg, ii, j, element)
# pull the contravariant vectors and compute the average
Ja1_node_ii = get_contravariant_vector(1, contravariant_vectors, ii, j,
element)
Ja1_avg = 0.5 * (Ja1_node + Ja1_node_ii)

# compute the contravariant sharp flux in the direction of the averaged contravariant vector
fluxtilde1 = volume_flux(u_node, u_node_ii, Ja1_avg, equations)
multiply_add_to_node_vars!(flux_temp, derivative_split[i, ii], fluxtilde1,
equations, dg, i, j)
multiply_add_to_node_vars!(flux_temp, derivative_split[ii, i], fluxtilde1,
equations, dg, ii, j)
end
end

# FV-form flux `fhat` in x direction
fhat1_L[:, 1, :] .= zero(eltype(fhat1_L))
fhat1_L[:, nnodes(dg) + 1, :] .= zero(eltype(fhat1_L))
fhat1_R[:, 1, :] .= zero(eltype(fhat1_R))
fhat1_R[:, nnodes(dg) + 1, :] .= zero(eltype(fhat1_R))

for j in eachnode(dg), i in 1:(nnodes(dg) - 1), v in eachvariable(equations)
fhat1_L[v, i + 1, j] = fhat1_L[v, i, j] + weights[i] * flux_temp[v, i, j]
fhat1_R[v, i + 1, j] = fhat1_L[v, i + 1, j]
end

# Split form volume flux in orientation 2: y direction
flux_temp .= zero(eltype(flux_temp))

for j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations, dg, i, j, element)

# pull the contravariant vectors in each coordinate direction
Ja2_node = get_contravariant_vector(2, contravariant_vectors, i, j, element)

# y direction
for jj in (j + 1):nnodes(dg)
u_node_jj = get_node_vars(u, equations, dg, i, jj, element)
# pull the contravariant vectors and compute the average
Ja2_node_jj = get_contravariant_vector(2, contravariant_vectors, i, jj,
element)
Ja2_avg = 0.5 * (Ja2_node + Ja2_node_jj)
# compute the contravariant sharp flux in the direction of the averaged contravariant vector
fluxtilde2 = volume_flux(u_node, u_node_jj, Ja2_avg, equations)
multiply_add_to_node_vars!(flux_temp, derivative_split[j, jj], fluxtilde2,
equations, dg, i, j)
multiply_add_to_node_vars!(flux_temp, derivative_split[jj, j], fluxtilde2,
equations, dg, i, jj)
end
end

# FV-form flux `fhat` in y direction
fhat2_L[:, :, 1] .= zero(eltype(fhat2_L))
fhat2_L[:, :, nnodes(dg) + 1] .= zero(eltype(fhat2_L))
fhat2_R[:, :, 1] .= zero(eltype(fhat2_R))
fhat2_R[:, :, nnodes(dg) + 1] .= zero(eltype(fhat2_R))

for j in 1:(nnodes(dg) - 1), i in eachnode(dg), v in eachvariable(equations)
fhat2_L[v, i, j + 1] = fhat2_L[v, i, j] + weights[j] * flux_temp[v, i, j]
fhat2_R[v, i, j + 1] = fhat2_L[v, i, j + 1]
end

return nothing
end
end # @muladd
Loading
Loading