From add2542c0076dc6526d969f78cec2f732430bc15 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 14 Aug 2023 19:05:34 +0200
Subject: [PATCH 01/36] Bump crate-ci/typos from 1.16.2 to 1.16.5 (#1606)
Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.2 to 1.16.5.
- [Release notes](https://github.com/crate-ci/typos/releases)
- [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crate-ci/typos/compare/v1.16.2...v1.16.5)
---
updated-dependencies:
- dependency-name: crate-ci/typos
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/SpellCheck.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
index a1a429cad97..6ebb288ea30 100644
--- a/.github/workflows/SpellCheck.yml
+++ b/.github/workflows/SpellCheck.yml
@@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v3
- name: Check spelling
- uses: crate-ci/typos@v1.16.2
+ uses: crate-ci/typos@v1.16.5
From 7f83a1a938eecd9b841efe215a6e482e67cfdcc1 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 15 Aug 2023 11:58:32 +0200
Subject: [PATCH 02/36] Enable MPI coverage with Linux and reduce heap size
hint (#1603)
* Enable MPI coverage with Linux and reduce heap size hint
* Update runtests.jl
* no MPI coverage CI on macOS
* Update runtests.jl
* Update runtests.jl
---
test/runtests.jl | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/test/runtests.jl b/test/runtests.jl
index 1b0c745dbfd..f1adbaaf1df 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -28,10 +28,10 @@ const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3)
cmd = string(Base.julia_cmd())
coverage = occursin("--code-coverage", cmd) &&
!occursin("--code-coverage=none", cmd)
- if !(coverage && Sys.iswindows()) && !(coverage && Sys.islinux())
+ if !(coverage && Sys.iswindows()) && !(coverage && Sys.isapple())
# We provide a `--heap-size-hint` to avoid/reduce out-of-memory errors during CI testing
mpiexec() do cmd
- run(`$cmd -n $TRIXI_MPI_NPROCS $(Base.julia_cmd()) --threads=1 --check-bounds=yes --heap-size-hint=1G $(abspath("test_mpi.jl"))`)
+ run(`$cmd -n $TRIXI_MPI_NPROCS $(Base.julia_cmd()) --threads=1 --check-bounds=yes --heap-size-hint=0.5G $(abspath("test_mpi.jl"))`)
end
end
end
From a4283e1e8253f7ddf2cabf22e2c7b39ce29a644f Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 15 Aug 2023 17:20:52 +0200
Subject: [PATCH 03/36] Update dependabot.yml (#1608)
---
.github/dependabot.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 700707ced32..d60f0707fc2 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -4,4 +4,4 @@ updates:
- package-ecosystem: "github-actions"
directory: "/" # Location of package manifests
schedule:
- interval: "weekly"
+ interval: "monthly"
From 4da5c53776c1d617a2b9bb656da02640f1d6a211 Mon Sep 17 00:00:00 2001
From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com>
Date: Fri, 18 Aug 2023 12:37:06 +0200
Subject: [PATCH 04/36] Subcell positivity IDP limiting for conservative
variables (#1476)
* Add IDP positivity limiting for conservative variables
* Add elixir with modified blast wave
* Add documentation
* Fix parameter type
* Adjust output of summary callback
* Merge changes from `subcell-limiting` and `main`
* Fix test with right time stepping
* Implement first suggestions
* Implement suggestions
* Fix elixir
* Relocate `perform_idp_correction!`
* Rename variable in `snake_case`
* Implement other suggestions
* Rename container variables using `snake_case`
* Delete timer
* Merge `subcell-limiting` (Adapt docstrings)
* Merge `subcell-limiting`
* Merge `subcell-limiting` (Renaming and dispatch)
* Fix documentation
* Implement positivty limiter with numbers of cons vars
* Merge suggestions already implemented in `subcell-limiting`
* Fix elixir
* Update docstring and output
* Restructure parameter for positivity limiting
* Add test for "show" routine
* Rename Limiters and Containers
* Rename antidiffusive stage callback
* Relocate subcell limiter code
* Move create_cache routine to specific file
* Implement suggestions
* Implement suggestions
---------
Co-authored-by: Hendrik Ranocha
Co-authored-by: Michael Schlottke-Lakemper
---
NEWS.md | 1 +
.../elixir_euler_shockcapturing_subcell.jl | 92 +++++++
...ubble_shockcapturing_subcell_positivity.jl | 140 ++++++++++
src/Trixi.jl | 5 +-
src/callbacks_stage/callbacks_stage.jl | 1 +
.../subcell_limiter_idp_correction.jl | 69 +++++
.../subcell_limiter_idp_correction_2d.jl | 44 ++++
src/solvers/dg.jl | 40 +++
src/solvers/dgsem_tree/containers_2d.jl | 136 +++++++++-
src/solvers/dgsem_tree/dg.jl | 5 +
.../dgsem_tree/dg_2d_subcell_limiters.jl | 193 ++++++++++++++
src/solvers/dgsem_tree/subcell_limiters.jl | 103 ++++++++
src/solvers/dgsem_tree/subcell_limiters_2d.jl | 114 +++++++++
src/time_integration/methods_SSP.jl | 241 ++++++++++++++++++
src/time_integration/time_integration.jl | 1 +
test/test_tree_2d_euler.jl | 6 +
test/test_tree_2d_eulermulti.jl | 8 +
test/test_unit.jl | 39 +--
18 files changed, 1218 insertions(+), 20 deletions(-)
create mode 100644 examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl
create mode 100644 examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
create mode 100644 src/callbacks_stage/subcell_limiter_idp_correction.jl
create mode 100644 src/callbacks_stage/subcell_limiter_idp_correction_2d.jl
create mode 100644 src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl
create mode 100644 src/solvers/dgsem_tree/subcell_limiters.jl
create mode 100644 src/solvers/dgsem_tree/subcell_limiters_2d.jl
create mode 100644 src/time_integration/methods_SSP.jl
diff --git a/NEWS.md b/NEWS.md
index 10125c40d17..4b96e1e2834 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -12,6 +12,7 @@ for human readability.
- Non-uniform `TreeMesh` available for hyperbolic-parabolic equations.
- Capability to set truly discontinuous initial conditions in 1D.
- Wetting and drying feature and examples for 1D and 2D shallow water equations
+- Subcell positivity limiting support for conservative variables in 2D for `TreeMesh`
#### Changed
diff --git a/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl
new file mode 100644
index 00000000000..6b69e4db563
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl
@@ -0,0 +1,92 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the compressible Euler equations
+
+equations = CompressibleEulerEquations2D(1.4)
+
+"""
+ initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D)
+
+A medium blast wave (modified to lower density and higher pressure) taken from
+- Sebastian Hennemann, Gregor J. Gassner (2020)
+ A provably entropy stable subcell shock capturing approach for high order split form DG
+ [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044)
+"""
+function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D)
+ # Modified From Hennemann & Gassner JCP paper 2020 (Sec. 6.3) -> modified to lower density, higher pressure
+ # Set up polar coordinates
+ inicenter = SVector(0.0, 0.0)
+ x_norm = x[1] - inicenter[1]
+ y_norm = x[2] - inicenter[2]
+ r = sqrt(x_norm^2 + y_norm^2)
+ phi = atan(y_norm, x_norm)
+ sin_phi, cos_phi = sincos(phi)
+
+ # Calculate primitive variables "normal" medium blast wave
+ rho = r > 0.5 ? 0.1 : 0.2691 # rho = r > 0.5 ? 1 : 1.1691
+ v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi
+ v2 = r > 0.5 ? 0.0 : 0.1882 * sin_phi
+ p = r > 0.5 ? 1.0E-1 : 1.245 # p = r > 0.5 ? 1.0E-3 : 1.245
+
+ return prim2cons(SVector(rho, v1, v2, p), equations)
+end
+initial_condition = initial_condition_blast_wave
+
+surface_flux = flux_lax_friedrichs
+volume_flux = flux_ranocha
+basis = LobattoLegendreBasis(3)
+limiter_idp = SubcellLimiterIDP(equations, basis;
+ positivity_variables_cons=[1],
+ positivity_correction_factor=0.5)
+volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+coordinates_min = (-2.0, -2.0)
+coordinates_max = ( 2.0, 2.0)
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=5,
+ n_cells_max=100_000)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true,
+ solution_variables=cons2prim)
+
+stepsize_callback = StepsizeCallback(cfl=0.6)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ save_solution,
+ stepsize_callback)
+
+
+###############################################################################
+# run the simulation
+
+stage_callbacks = (SubcellLimiterIDPCorrection(),)
+
+sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks=stage_callbacks);
+ dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
new file mode 100644
index 00000000000..a67eaeb5b2b
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
@@ -0,0 +1,140 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the compressible Euler multicomponent equations
+
+# 1) Dry Air 2) Helium + 28% Air
+equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.648),
+ gas_constants = (0.287, 1.578))
+
+"""
+ initial_condition_shock_bubble(x, t, equations::CompressibleEulerMulticomponentEquations2D{5, 2})
+
+A shock-bubble testcase for multicomponent Euler equations
+- Ayoub Gouasmi, Karthik Duraisamy, Scott Murman
+ Formulation of Entropy-Stable schemes for the multicomponent compressible Euler equations
+ [arXiv: 1904.00972](https://arxiv.org/abs/1904.00972)
+"""
+function initial_condition_shock_bubble(x, t, equations::CompressibleEulerMulticomponentEquations2D{5, 2})
+ # bubble test case, see Gouasmi et al. https://arxiv.org/pdf/1904.00972
+ # other reference: https://www.researchgate.net/profile/Pep_Mulet/publication/222675930_A_flux-split_algorithm_applied_to_conservative_models_for_multicomponent_compressible_flows/links/568da54508aeaa1481ae7af0.pdf
+ # typical domain is rectangular, we change it to a square, as Trixi can only do squares
+ @unpack gas_constants = equations
+
+ # Positivity Preserving Parameter, can be set to zero if scheme is positivity preserving
+ delta = 0.03
+
+ # Region I
+ rho1_1 = delta
+ rho2_1 = 1.225 * gas_constants[1]/gas_constants[2] - delta
+ v1_1 = zero(delta)
+ v2_1 = zero(delta)
+ p_1 = 101325
+
+ # Region II
+ rho1_2 = 1.225-delta
+ rho2_2 = delta
+ v1_2 = zero(delta)
+ v2_2 = zero(delta)
+ p_2 = 101325
+
+ # Region III
+ rho1_3 = 1.6861 - delta
+ rho2_3 = delta
+ v1_3 = -113.5243
+ v2_3 = zero(delta)
+ p_3 = 159060
+
+ # Set up Region I & II:
+ inicenter = SVector(zero(delta), zero(delta))
+ x_norm = x[1] - inicenter[1]
+ y_norm = x[2] - inicenter[2]
+ r = sqrt(x_norm^2 + y_norm^2)
+
+ if (x[1] > 0.50)
+ # Set up Region III
+ rho1 = rho1_3
+ rho2 = rho2_3
+ v1 = v1_3
+ v2 = v2_3
+ p = p_3
+ elseif (r < 0.25)
+ # Set up Region I
+ rho1 = rho1_1
+ rho2 = rho2_1
+ v1 = v1_1
+ v2 = v2_1
+ p = p_1
+ else
+ # Set up Region II
+ rho1 = rho1_2
+ rho2 = rho2_2
+ v1 = v1_2
+ v2 = v2_2
+ p = p_2
+ end
+
+ return prim2cons(SVector(v1, v2, p, rho1, rho2), equations)
+end
+initial_condition = initial_condition_shock_bubble
+
+surface_flux = flux_lax_friedrichs
+volume_flux = flux_ranocha
+basis = LobattoLegendreBasis(3)
+
+limiter_idp = SubcellLimiterIDP(equations, basis;
+ positivity_variables_cons=[(i+3 for i in eachcomponent(equations))...])
+
+volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+coordinates_min = (-2.25, -2.225)
+coordinates_max = ( 2.20, 2.225)
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=3,
+ n_cells_max=1_000_000)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 0.01)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 300
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval,
+ extra_analysis_integrals=(Trixi.density,))
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=300,
+ save_initial_solution=true,
+ save_final_solution=true,
+ solution_variables=cons2prim)
+
+stepsize_callback = StepsizeCallback(cfl=0.9)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback,
+ save_solution,
+ stepsize_callback)
+
+
+###############################################################################
+# run the simulation
+
+stage_callbacks = (SubcellLimiterIDPCorrection(),)
+
+sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks=stage_callbacks);
+ dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+summary_callback() # print the timer summary
\ No newline at end of file
diff --git a/src/Trixi.jl b/src/Trixi.jl
index 78ddaa3ca7f..ec4d20558e5 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -121,10 +121,10 @@ include("semidiscretization/semidiscretization_hyperbolic.jl")
include("semidiscretization/semidiscretization_hyperbolic_parabolic.jl")
include("semidiscretization/semidiscretization_euler_acoustics.jl")
include("semidiscretization/semidiscretization_coupled.jl")
+include("time_integration/time_integration.jl")
include("callbacks_step/callbacks_step.jl")
include("callbacks_stage/callbacks_stage.jl")
include("semidiscretization/semidiscretization_euler_gravity.jl")
-include("time_integration/time_integration.jl")
# `trixi_include` and special elixirs such as `convergence_test`
include("auxiliary/special_elixirs.jl")
@@ -229,6 +229,9 @@ export DG,
SurfaceIntegralUpwind,
MortarL2
+export VolumeIntegralSubcellLimiting,
+ SubcellLimiterIDP, SubcellLimiterIDPCorrection
+
export nelements, nnodes, nvariables,
eachelement, eachnode, eachvariable
diff --git a/src/callbacks_stage/callbacks_stage.jl b/src/callbacks_stage/callbacks_stage.jl
index ab0f34efb78..976af327e6f 100644
--- a/src/callbacks_stage/callbacks_stage.jl
+++ b/src/callbacks_stage/callbacks_stage.jl
@@ -6,6 +6,7 @@
#! format: noindent
include("positivity_zhang_shu.jl")
+include("subcell_limiter_idp_correction.jl")
# TODO: TrixiShallowWater: move specific limiter file
include("positivity_shallow_water.jl")
end # @muladd
diff --git a/src/callbacks_stage/subcell_limiter_idp_correction.jl b/src/callbacks_stage/subcell_limiter_idp_correction.jl
new file mode 100644
index 00000000000..69125ebecd9
--- /dev/null
+++ b/src/callbacks_stage/subcell_limiter_idp_correction.jl
@@ -0,0 +1,69 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+"""
+ SubcellLimiterIDPCorrection()
+
+Perform antidiffusive correction stage for the a posteriori IDP limiter [`SubcellLimiterIDP`](@ref)
+called with [`VolumeIntegralSubcellLimiting`](@ref).
+
+!!! note
+ This callback and the actual limiter [`SubcellLimiterIDP`](@ref) only work together.
+ This is not a replacement but a necessary addition.
+
+## References
+
+- Rueda-Ramírez, Pazner, Gassner (2022)
+ Subcell Limiting Strategies for Discontinuous Galerkin Spectral Element Methods
+ [DOI: 10.1016/j.compfluid.2022.105627](https://doi.org/10.1016/j.compfluid.2022.105627)
+- Pazner (2020)
+ Sparse invariant domain preserving discontinuous Galerkin methods with subcell convex limiting
+ [DOI: 10.1016/j.cma.2021.113876](https://doi.org/10.1016/j.cma.2021.113876)
+
+!!! warning "Experimental implementation"
+ This is an experimental feature and may change in future releases.
+"""
+struct SubcellLimiterIDPCorrection end
+
+function (limiter!::SubcellLimiterIDPCorrection)(u_ode,
+ integrator::Trixi.SimpleIntegratorSSP,
+ stage)
+ semi = integrator.p
+ limiter!(u_ode, semi, integrator.t, integrator.dt,
+ semi.solver.volume_integral)
+end
+
+function (limiter!::SubcellLimiterIDPCorrection)(u_ode, semi, t, dt,
+ volume_integral::VolumeIntegralSubcellLimiting)
+ @trixi_timeit timer() "a posteriori limiter" limiter!(u_ode, semi, t, dt,
+ volume_integral.limiter)
+end
+
+function (limiter!::SubcellLimiterIDPCorrection)(u_ode, semi, t, dt,
+ limiter::SubcellLimiterIDP)
+ mesh, equations, solver, cache = mesh_equations_solver_cache(semi)
+
+ u = wrap_array(u_ode, mesh, equations, solver, cache)
+
+ # Calculate blending factor alpha in [0,1]
+ # f_ij = alpha_ij * f^(FV)_ij + (1 - alpha_ij) * f^(DG)_ij
+ # = f^(FV)_ij + (1 - alpha_ij) * f^(antidiffusive)_ij
+ @trixi_timeit timer() "blending factors" solver.volume_integral.limiter(u, semi,
+ solver, t,
+ dt)
+
+ perform_idp_correction!(u, dt, mesh, equations, solver, cache)
+
+ return nothing
+end
+
+init_callback(limiter!::SubcellLimiterIDPCorrection, semi) = nothing
+
+finalize_callback(limiter!::SubcellLimiterIDPCorrection, semi) = nothing
+
+include("subcell_limiter_idp_correction_2d.jl")
+end # @muladd
diff --git a/src/callbacks_stage/subcell_limiter_idp_correction_2d.jl b/src/callbacks_stage/subcell_limiter_idp_correction_2d.jl
new file mode 100644
index 00000000000..f6b91444578
--- /dev/null
+++ b/src/callbacks_stage/subcell_limiter_idp_correction_2d.jl
@@ -0,0 +1,44 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+function perform_idp_correction!(u, dt, mesh::TreeMesh2D, equations, dg, cache)
+ @unpack inverse_weights = dg.basis
+ @unpack antidiffusive_flux1, antidiffusive_flux2 = cache.antidiffusive_fluxes
+ @unpack alpha1, alpha2 = dg.volume_integral.limiter.cache.subcell_limiter_coefficients
+
+ @threaded for element in eachelement(dg, cache)
+ # Sign switch as in apply_jacobian!
+ inverse_jacobian = -cache.elements.inverse_jacobian[element]
+
+ for j in eachnode(dg), i in eachnode(dg)
+ # Note: antidiffusive_flux1[v, i, xi, element] = antidiffusive_flux2[v, xi, i, element] = 0 for all i in 1:nnodes and xi in {1, nnodes+1}
+ alpha_flux1 = (1 - alpha1[i, j, element]) *
+ get_node_vars(antidiffusive_flux1, equations, dg, i, j,
+ element)
+ alpha_flux1_ip1 = (1 - alpha1[i + 1, j, element]) *
+ get_node_vars(antidiffusive_flux1, equations, dg, i + 1,
+ j, element)
+ alpha_flux2 = (1 - alpha2[i, j, element]) *
+ get_node_vars(antidiffusive_flux2, equations, dg, i, j,
+ element)
+ alpha_flux2_jp1 = (1 - alpha2[i, j + 1, element]) *
+ get_node_vars(antidiffusive_flux2, equations, dg, i,
+ j + 1, element)
+
+ for v in eachvariable(equations)
+ u[v, i, j, element] += dt * inverse_jacobian *
+ (inverse_weights[i] *
+ (alpha_flux1_ip1[v] - alpha_flux1[v]) +
+ inverse_weights[j] *
+ (alpha_flux2_jp1[v] - alpha_flux2[v]))
+ end
+ end
+ end
+
+ return nothing
+end
+end # @muladd
diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl
index 495e0ffc4a4..36bbc6de361 100644
--- a/src/solvers/dg.jl
+++ b/src/solvers/dg.jl
@@ -174,6 +174,46 @@ function Base.show(io::IO, ::MIME"text/plain",
end
end
+"""
+ VolumeIntegralSubcellLimiting(limiter;
+ volume_flux_dg, volume_flux_fv)
+
+A subcell limiting volume integral type for DG methods based on subcell blending approaches
+with a low-order FV method. Used with limiter [`SubcellLimiterIDP`](@ref).
+
+!!! warning "Experimental implementation"
+ This is an experimental feature and may change in future releases.
+"""
+struct VolumeIntegralSubcellLimiting{VolumeFluxDG, VolumeFluxFV, Limiter} <:
+ AbstractVolumeIntegral
+ volume_flux_dg::VolumeFluxDG
+ volume_flux_fv::VolumeFluxFV
+ limiter::Limiter
+end
+
+function VolumeIntegralSubcellLimiting(limiter; volume_flux_dg,
+ volume_flux_fv)
+ VolumeIntegralSubcellLimiting{typeof(volume_flux_dg), typeof(volume_flux_fv),
+ typeof(limiter)}(volume_flux_dg, volume_flux_fv,
+ limiter)
+end
+
+function Base.show(io::IO, mime::MIME"text/plain",
+ integral::VolumeIntegralSubcellLimiting)
+ @nospecialize integral # reduce precompilation time
+
+ if get(io, :compact, false)
+ show(io, integral)
+ else
+ summary_header(io, "VolumeIntegralSubcellLimiting")
+ summary_line(io, "volume flux DG", integral.volume_flux_dg)
+ summary_line(io, "volume flux FV", integral.volume_flux_fv)
+ summary_line(io, "limiter", integral.limiter |> typeof |> nameof)
+ show(increment_indent(io), mime, integral.limiter)
+ summary_footer(io)
+ end
+end
+
# TODO: FD. Should this definition live in a different file because it is
# not strictly a DG method?
"""
diff --git a/src/solvers/dgsem_tree/containers_2d.jl b/src/solvers/dgsem_tree/containers_2d.jl
index d80522d42fd..9148b936312 100644
--- a/src/solvers/dgsem_tree/containers_2d.jl
+++ b/src/solvers/dgsem_tree/containers_2d.jl
@@ -77,7 +77,7 @@ end
eachelement(elements::ElementContainer2D)
Return an iterator over the indices that specify the location in relevant data structures
-for the elements in `elements`.
+for the elements in `elements`.
In particular, not the elements themselves are returned.
"""
@inline eachelement(elements::ElementContainer2D) = Base.OneTo(nelements(elements))
@@ -1254,4 +1254,138 @@ function init_mpi_mortars!(mpi_mortars, elements, mesh::TreeMesh2D)
return nothing
end
+
+# Container data structure (structure-of-arrays style) for FCT-type antidiffusive fluxes
+# (i, j+1)
+# |
+# flux2(i, j+1)
+# |
+# (i-1, j) ---flux1(i, j)--- (i, j) ---flux1(i+1, j)--- (i+1, j)
+# |
+# flux2(i, j)
+# |
+# (i, j-1)
+mutable struct ContainerAntidiffusiveFlux2D{uEltype <: Real}
+ antidiffusive_flux1::Array{uEltype, 4} # [variables, i, j, elements]
+ antidiffusive_flux2::Array{uEltype, 4} # [variables, i, j, elements]
+ # internal `resize!`able storage
+ _antidiffusive_flux1::Vector{uEltype}
+ _antidiffusive_flux2::Vector{uEltype}
+end
+
+function ContainerAntidiffusiveFlux2D{uEltype}(capacity::Integer, n_variables,
+ n_nodes) where {uEltype <: Real}
+ nan_uEltype = convert(uEltype, NaN)
+
+ # Initialize fields with defaults
+ _antidiffusive_flux1 = fill(nan_uEltype,
+ n_variables * (n_nodes + 1) * n_nodes * capacity)
+ antidiffusive_flux1 = unsafe_wrap(Array, pointer(_antidiffusive_flux1),
+ (n_variables, n_nodes + 1, n_nodes, capacity))
+
+ _antidiffusive_flux2 = fill(nan_uEltype,
+ n_variables * n_nodes * (n_nodes + 1) * capacity)
+ antidiffusive_flux2 = unsafe_wrap(Array, pointer(_antidiffusive_flux2),
+ (n_variables, n_nodes, n_nodes + 1, capacity))
+
+ return ContainerAntidiffusiveFlux2D{uEltype}(antidiffusive_flux1,
+ antidiffusive_flux2,
+ _antidiffusive_flux1,
+ _antidiffusive_flux2)
+end
+
+nvariables(fluxes::ContainerAntidiffusiveFlux2D) = size(fluxes.antidiffusive_flux1, 1)
+nnodes(fluxes::ContainerAntidiffusiveFlux2D) = size(fluxes.antidiffusive_flux1, 3)
+
+# Only one-dimensional `Array`s are `resize!`able in Julia.
+# Hence, we use `Vector`s as internal storage and `resize!`
+# them whenever needed. Then, we reuse the same memory by
+# `unsafe_wrap`ping multi-dimensional `Array`s around the
+# internal storage.
+function Base.resize!(fluxes::ContainerAntidiffusiveFlux2D, capacity)
+ n_nodes = nnodes(fluxes)
+ n_variables = nvariables(fluxes)
+
+ @unpack _antidiffusive_flux1, _antidiffusive_flux2 = fluxes
+
+ resize!(_antidiffusive_flux1, n_variables * (n_nodes + 1) * n_nodes * capacity)
+ fluxes.antidiffusive_flux1 = unsafe_wrap(Array, pointer(_antidiffusive_flux1),
+ (n_variables, n_nodes + 1, n_nodes,
+ capacity))
+ resize!(_antidiffusive_flux2, n_variables * n_nodes * (n_nodes + 1) * capacity)
+ fluxes.antidiffusive_flux2 = unsafe_wrap(Array, pointer(_antidiffusive_flux2),
+ (n_variables, n_nodes, n_nodes + 1,
+ capacity))
+
+ return nothing
+end
+
+# Container data structure (structure-of-arrays style) for variables used for IDP limiting
+mutable struct ContainerSubcellLimiterIDP2D{uEltype <: Real}
+ alpha::Array{uEltype, 3} # [i, j, element]
+ alpha1::Array{uEltype, 3}
+ alpha2::Array{uEltype, 3}
+ variable_bounds::Vector{Array{uEltype, 3}}
+ # internal `resize!`able storage
+ _alpha::Vector{uEltype}
+ _alpha1::Vector{uEltype}
+ _alpha2::Vector{uEltype}
+ _variable_bounds::Vector{Vector{uEltype}}
+end
+
+function ContainerSubcellLimiterIDP2D{uEltype}(capacity::Integer, n_nodes,
+ length) where {uEltype <: Real}
+ nan_uEltype = convert(uEltype, NaN)
+
+ # Initialize fields with defaults
+ _alpha = fill(nan_uEltype, n_nodes * n_nodes * capacity)
+ alpha = unsafe_wrap(Array, pointer(_alpha), (n_nodes, n_nodes, capacity))
+ _alpha1 = fill(nan_uEltype, (n_nodes + 1) * n_nodes * capacity)
+ alpha1 = unsafe_wrap(Array, pointer(_alpha1), (n_nodes + 1, n_nodes, capacity))
+ _alpha2 = fill(nan_uEltype, n_nodes * (n_nodes + 1) * capacity)
+ alpha2 = unsafe_wrap(Array, pointer(_alpha2), (n_nodes, n_nodes + 1, capacity))
+
+ _variable_bounds = Vector{Vector{uEltype}}(undef, length)
+ variable_bounds = Vector{Array{uEltype, 3}}(undef, length)
+ for i in 1:length
+ _variable_bounds[i] = fill(nan_uEltype, n_nodes * n_nodes * capacity)
+ variable_bounds[i] = unsafe_wrap(Array, pointer(_variable_bounds[i]),
+ (n_nodes, n_nodes, capacity))
+ end
+
+ return ContainerSubcellLimiterIDP2D{uEltype}(alpha, alpha1, alpha2,
+ variable_bounds,
+ _alpha, _alpha1, _alpha2,
+ _variable_bounds)
+end
+
+nnodes(container::ContainerSubcellLimiterIDP2D) = size(container.alpha, 1)
+
+# Only one-dimensional `Array`s are `resize!`able in Julia.
+# Hence, we use `Vector`s as internal storage and `resize!`
+# them whenever needed. Then, we reuse the same memory by
+# `unsafe_wrap`ping multi-dimensional `Array`s around the
+# internal storage.
+function Base.resize!(container::ContainerSubcellLimiterIDP2D, capacity)
+ n_nodes = nnodes(container)
+
+ @unpack _alpha, _alpha1, _alpha2 = container
+ resize!(_alpha, n_nodes * n_nodes * capacity)
+ container.alpha = unsafe_wrap(Array, pointer(_alpha), (n_nodes, n_nodes, capacity))
+ resize!(_alpha1, (n_nodes + 1) * n_nodes * capacity)
+ container.alpha1 = unsafe_wrap(Array, pointer(_alpha1),
+ (n_nodes + 1, n_nodes, capacity))
+ resize!(_alpha2, n_nodes * (n_nodes + 1) * capacity)
+ container.alpha2 = unsafe_wrap(Array, pointer(_alpha2),
+ (n_nodes, n_nodes + 1, capacity))
+
+ @unpack _variable_bounds = container
+ for i in 1:length(_variable_bounds)
+ resize!(_variable_bounds[i], n_nodes * n_nodes * capacity)
+ container.variable_bounds[i] = unsafe_wrap(Array, pointer(_variable_bounds[i]),
+ (n_nodes, n_nodes, capacity))
+ end
+
+ return nothing
+end
end # @muladd
diff --git a/src/solvers/dgsem_tree/dg.jl b/src/solvers/dgsem_tree/dg.jl
index cb28dad968c..6e02bc1d94a 100644
--- a/src/solvers/dgsem_tree/dg.jl
+++ b/src/solvers/dgsem_tree/dg.jl
@@ -71,4 +71,9 @@ include("dg_3d_parabolic.jl")
# as well as specialized implementations used to improve performance
include("dg_2d_compressible_euler.jl")
include("dg_3d_compressible_euler.jl")
+
+# Subcell limiters
+include("subcell_limiters.jl")
+include("subcell_limiters_2d.jl")
+include("dg_2d_subcell_limiters.jl")
end # @muladd
diff --git a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl
new file mode 100644
index 00000000000..70ff346740d
--- /dev/null
+++ b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl
@@ -0,0 +1,193 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+function create_cache(mesh::TreeMesh{2}, equations,
+ volume_integral::VolumeIntegralSubcellLimiting, dg::DG, uEltype)
+ cache = create_cache(mesh, equations,
+ VolumeIntegralPureLGLFiniteVolume(volume_integral.volume_flux_fv),
+ dg, uEltype)
+
+ A3dp1_x = Array{uEltype, 3}
+ A3dp1_y = Array{uEltype, 3}
+ A3d = Array{uEltype, 3}
+
+ fhat1_threaded = A3dp1_x[A3dp1_x(undef, nvariables(equations), nnodes(dg) + 1,
+ nnodes(dg)) for _ in 1:Threads.nthreads()]
+ fhat2_threaded = A3dp1_y[A3dp1_y(undef, nvariables(equations), nnodes(dg),
+ nnodes(dg) + 1) for _ in 1:Threads.nthreads()]
+ flux_temp_threaded = A3d[A3d(undef, nvariables(equations), nnodes(dg), nnodes(dg))
+ for _ in 1:Threads.nthreads()]
+
+ antidiffusive_fluxes = Trixi.ContainerAntidiffusiveFlux2D{uEltype}(0,
+ nvariables(equations),
+ nnodes(dg))
+
+ return (; cache..., antidiffusive_fluxes, fhat1_threaded, fhat2_threaded,
+ flux_temp_threaded)
+end
+
+function calc_volume_integral!(du, u,
+ mesh::TreeMesh{2},
+ nonconservative_terms, equations,
+ volume_integral::VolumeIntegralSubcellLimiting,
+ dg::DGSEM, cache)
+ @unpack limiter = volume_integral
+
+ @threaded for element in eachelement(dg, cache)
+ subcell_limiting_kernel!(du, u, element, mesh,
+ nonconservative_terms, equations,
+ volume_integral, limiter,
+ dg, cache)
+ end
+end
+
+@inline function subcell_limiting_kernel!(du, u,
+ element, mesh::TreeMesh{2},
+ nonconservative_terms::False, equations,
+ volume_integral, limiter::SubcellLimiterIDP,
+ dg::DGSEM, cache)
+ @unpack inverse_weights = dg.basis
+ @unpack volume_flux_dg, volume_flux_fv = volume_integral
+
+ # high-order DG fluxes
+ @unpack fhat1_threaded, fhat2_threaded = cache
+
+ fhat1 = fhat1_threaded[Threads.threadid()]
+ fhat2 = fhat2_threaded[Threads.threadid()]
+ calcflux_fhat!(fhat1, fhat2, u, mesh,
+ nonconservative_terms, equations, volume_flux_dg, dg, element, cache)
+
+ # low-order FV fluxes
+ @unpack fstar1_L_threaded, fstar1_R_threaded, fstar2_L_threaded, fstar2_R_threaded = cache
+
+ fstar1_L = fstar1_L_threaded[Threads.threadid()]
+ fstar2_L = fstar2_L_threaded[Threads.threadid()]
+ fstar1_R = fstar1_R_threaded[Threads.threadid()]
+ fstar2_R = fstar2_R_threaded[Threads.threadid()]
+ calcflux_fv!(fstar1_L, fstar1_R, fstar2_L, fstar2_R, u, mesh,
+ nonconservative_terms, equations, volume_flux_fv, dg, element, cache)
+
+ # antidiffusive flux
+ calcflux_antidiffusive!(fhat1, fhat2, fstar1_L, fstar2_L, u, mesh,
+ nonconservative_terms, equations, limiter, dg, element,
+ cache)
+
+ # Calculate volume integral contribution of low-order FV flux
+ for j in eachnode(dg), i in eachnode(dg)
+ for v in eachvariable(equations)
+ du[v, i, j, element] += inverse_weights[i] *
+ (fstar1_L[v, i + 1, j] - fstar1_R[v, i, j]) +
+ inverse_weights[j] *
+ (fstar2_L[v, i, j + 1] - fstar2_R[v, i, j])
+ end
+ end
+
+ return nothing
+end
+
+# Calculate the DG staggered volume fluxes `fhat` in subcell FV-form inside the element
+# (**without non-conservative terms**).
+#
+# See also `flux_differencing_kernel!`.
+@inline function calcflux_fhat!(fhat1, fhat2, u,
+ mesh::TreeMesh{2}, nonconservative_terms::False,
+ equations,
+ volume_flux, dg::DGSEM, element, cache)
+ @unpack weights, derivative_split = dg.basis
+ @unpack flux_temp_threaded = cache
+
+ flux_temp = flux_temp_threaded[Threads.threadid()]
+
+ # The FV-form fluxes are calculated in a recursive manner, i.e.:
+ # fhat_(0,1) = w_0 * FVol_0,
+ # fhat_(j,j+1) = fhat_(j-1,j) + w_j * FVol_j, for j=1,...,N-1,
+ # with the split form volume fluxes FVol_j = -2 * sum_i=0^N D_ji f*_(j,i).
+
+ # To use the symmetry of the `volume_flux`, the split form volume flux is precalculated
+ # like in `calc_volume_integral!` for the `VolumeIntegralFluxDifferencing`
+ # and saved in in `flux_temp`.
+
+ # Split form volume flux in orientation 1: x direction
+ flux_temp .= zero(eltype(flux_temp))
+
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+
+ # All diagonal entries of `derivative_split` are zero. Thus, we can skip
+ # the computation of the diagonal terms. In addition, we use the symmetry
+ # of the `volume_flux` to save half of the possible two-point flux
+ # computations.
+ for ii in (i + 1):nnodes(dg)
+ u_node_ii = get_node_vars(u, equations, dg, ii, j, element)
+ flux1 = volume_flux(u_node, u_node_ii, 1, equations)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[i, ii], flux1,
+ equations, dg, i, j)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[ii, i], flux1,
+ equations, dg, ii, j)
+ end
+ end
+
+ # FV-form flux `fhat` in x direction
+ fhat1[:, 1, :] .= zero(eltype(fhat1))
+ fhat1[:, nnodes(dg) + 1, :] .= zero(eltype(fhat1))
+
+ for j in eachnode(dg), i in 1:(nnodes(dg) - 1), v in eachvariable(equations)
+ fhat1[v, i + 1, j] = fhat1[v, i, j] + weights[i] * flux_temp[v, i, j]
+ end
+
+ # Split form volume flux in orientation 2: y direction
+ flux_temp .= zero(eltype(flux_temp))
+
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+ for jj in (j + 1):nnodes(dg)
+ u_node_jj = get_node_vars(u, equations, dg, i, jj, element)
+ flux2 = volume_flux(u_node, u_node_jj, 2, equations)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[j, jj], flux2,
+ equations, dg, i, j)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[jj, j], flux2,
+ equations, dg, i, jj)
+ end
+ end
+
+ # FV-form flux `fhat` in y direction
+ fhat2[:, :, 1] .= zero(eltype(fhat2))
+ fhat2[:, :, nnodes(dg) + 1] .= zero(eltype(fhat2))
+
+ for j in 1:(nnodes(dg) - 1), i in eachnode(dg), v in eachvariable(equations)
+ fhat2[v, i, j + 1] = fhat2[v, i, j] + weights[j] * flux_temp[v, i, j]
+ end
+
+ return nothing
+end
+
+# Calculate the antidiffusive flux `antidiffusive_flux` as the subtraction between `fhat` and `fstar`.
+@inline function calcflux_antidiffusive!(fhat1, fhat2, fstar1, fstar2, u, mesh,
+ nonconservative_terms, equations,
+ limiter::SubcellLimiterIDP, dg, element, cache)
+ @unpack antidiffusive_flux1, antidiffusive_flux2 = cache.antidiffusive_fluxes
+
+ for j in eachnode(dg), i in 2:nnodes(dg)
+ for v in eachvariable(equations)
+ antidiffusive_flux1[v, i, j, element] = fhat1[v, i, j] - fstar1[v, i, j]
+ end
+ end
+ for j in 2:nnodes(dg), i in eachnode(dg)
+ for v in eachvariable(equations)
+ antidiffusive_flux2[v, i, j, element] = fhat2[v, i, j] - fstar2[v, i, j]
+ end
+ end
+
+ antidiffusive_flux1[:, 1, :, element] .= zero(eltype(antidiffusive_flux1))
+ antidiffusive_flux1[:, nnodes(dg) + 1, :, element] .= zero(eltype(antidiffusive_flux1))
+
+ antidiffusive_flux2[:, :, 1, element] .= zero(eltype(antidiffusive_flux2))
+ antidiffusive_flux2[:, :, nnodes(dg) + 1, element] .= zero(eltype(antidiffusive_flux2))
+
+ return nothing
+end
+end # @muladd
diff --git a/src/solvers/dgsem_tree/subcell_limiters.jl b/src/solvers/dgsem_tree/subcell_limiters.jl
new file mode 100644
index 00000000000..3a707de3bc7
--- /dev/null
+++ b/src/solvers/dgsem_tree/subcell_limiters.jl
@@ -0,0 +1,103 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+abstract type AbstractSubcellLimiter end
+
+function create_cache(typ::Type{LimiterType},
+ semi) where {LimiterType <: AbstractSubcellLimiter}
+ create_cache(typ, mesh_equations_solver_cache(semi)...)
+end
+
+"""
+ SubcellLimiterIDP(equations::AbstractEquations, basis;
+ positivity_variables_cons = [],
+ positivity_correction_factor = 0.1)
+
+Subcell invariant domain preserving (IDP) limiting used with [`VolumeIntegralSubcellLimiting`](@ref)
+including:
+- positivity limiting for conservative variables (`positivity_variables_cons`)
+
+The bounds are calculated using the low-order FV solution. The positivity limiter uses
+`positivity_correction_factor` such that `u^new >= positivity_correction_factor * u^FV`.
+
+!!! note
+ This limiter and the correction callback [`SubcellLimiterIDPCorrection`](@ref) only work together.
+ Without the callback, no limiting takes place, leading to a standard flux-differencing DGSEM scheme.
+
+## References
+
+- Rueda-Ramírez, Pazner, Gassner (2022)
+ Subcell Limiting Strategies for Discontinuous Galerkin Spectral Element Methods
+ [DOI: 10.1016/j.compfluid.2022.105627](https://doi.org/10.1016/j.compfluid.2022.105627)
+- Pazner (2020)
+ Sparse invariant domain preserving discontinuous Galerkin methods with subcell convex limiting
+ [DOI: 10.1016/j.cma.2021.113876](https://doi.org/10.1016/j.cma.2021.113876)
+
+!!! warning "Experimental implementation"
+ This is an experimental feature and may change in future releases.
+"""
+struct SubcellLimiterIDP{RealT <: Real, Cache} <: AbstractSubcellLimiter
+ positivity::Bool
+ positivity_variables_cons::Vector{Int} # Positivity for conservative variables
+ positivity_correction_factor::RealT
+ cache::Cache
+end
+
+# this method is used when the indicator is constructed as for shock-capturing volume integrals
+function SubcellLimiterIDP(equations::AbstractEquations, basis;
+ positivity_variables_cons = [],
+ positivity_correction_factor = 0.1)
+ positivity = (length(positivity_variables_cons) > 0)
+ number_bounds = length(positivity_variables_cons)
+
+ cache = create_cache(SubcellLimiterIDP, equations, basis, number_bounds)
+
+ SubcellLimiterIDP{typeof(positivity_correction_factor), typeof(cache)}(positivity,
+ positivity_variables_cons,
+ positivity_correction_factor,
+ cache)
+end
+
+function Base.show(io::IO, limiter::SubcellLimiterIDP)
+ @nospecialize limiter # reduce precompilation time
+ @unpack positivity = limiter
+
+ print(io, "SubcellLimiterIDP(")
+ if !(positivity)
+ print(io, "No limiter selected => pure DG method")
+ else
+ print(io, "limiter=(")
+ positivity && print(io, "positivity")
+ print(io, "), ")
+ end
+ print(io, ")")
+end
+
+function Base.show(io::IO, ::MIME"text/plain", limiter::SubcellLimiterIDP)
+ @nospecialize limiter # reduce precompilation time
+ @unpack positivity = limiter
+
+ if get(io, :compact, false)
+ show(io, limiter)
+ else
+ if !(positivity)
+ setup = ["limiter" => "No limiter selected => pure DG method"]
+ else
+ setup = ["limiter" => ""]
+ if positivity
+ string = "positivity with conservative variables $(limiter.positivity_variables_cons)"
+ setup = [setup..., "" => string]
+ setup = [
+ setup...,
+ "" => " positivity correction factor = $(limiter.positivity_correction_factor)",
+ ]
+ end
+ end
+ summary_box(io, "SubcellLimiterIDP", setup)
+ end
+end
+end # @muladd
diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
new file mode 100644
index 00000000000..09ab84ed11a
--- /dev/null
+++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
@@ -0,0 +1,114 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# this method is used when the limiter is constructed as for shock-capturing volume integrals
+function create_cache(indicator::Type{SubcellLimiterIDP},
+ equations::AbstractEquations{2},
+ basis::LobattoLegendreBasis, number_bounds)
+ subcell_limiter_coefficients = Trixi.ContainerSubcellLimiterIDP2D{real(basis)
+ }(0,
+ nnodes(basis),
+ number_bounds)
+
+ cache = (; subcell_limiter_coefficients)
+
+ return cache
+end
+
+function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSEM, t,
+ dt;
+ kwargs...)
+ @unpack alpha = limiter.cache.subcell_limiter_coefficients
+ alpha .= zero(eltype(alpha))
+
+ if limiter.positivity
+ @trixi_timeit timer() "positivity" idp_positivity!(alpha, limiter, u, dt,
+ semi)
+ end
+
+ # Calculate alpha1 and alpha2
+ @unpack alpha1, alpha2 = limiter.cache.subcell_limiter_coefficients
+ @threaded for element in eachelement(dg, semi.cache)
+ for j in eachnode(dg), i in 2:nnodes(dg)
+ alpha1[i, j, element] = max(alpha[i - 1, j, element], alpha[i, j, element])
+ end
+ for j in 2:nnodes(dg), i in eachnode(dg)
+ alpha2[i, j, element] = max(alpha[i, j - 1, element], alpha[i, j, element])
+ end
+ alpha1[1, :, element] .= zero(eltype(alpha1))
+ alpha1[nnodes(dg) + 1, :, element] .= zero(eltype(alpha1))
+ alpha2[:, 1, element] .= zero(eltype(alpha2))
+ alpha2[:, nnodes(dg) + 1, element] .= zero(eltype(alpha2))
+ end
+
+ return nothing
+end
+
+@inline function idp_positivity!(alpha, limiter, u, dt, semi)
+ # Conservative variables
+ for (index, variable) in enumerate(limiter.positivity_variables_cons)
+ idp_positivity!(alpha, limiter, u, dt, semi, variable, index)
+ end
+
+ return nothing
+end
+
+@inline function idp_positivity!(alpha, limiter, u, dt, semi, variable, index)
+ mesh, equations, dg, cache = mesh_equations_solver_cache(semi)
+ @unpack antidiffusive_flux1, antidiffusive_flux2 = cache.antidiffusive_fluxes
+ @unpack inverse_weights = dg.basis
+ @unpack positivity_correction_factor = limiter
+
+ @unpack variable_bounds = limiter.cache.subcell_limiter_coefficients
+
+ var_min = variable_bounds[index]
+
+ @threaded for element in eachelement(dg, semi.cache)
+ inverse_jacobian = cache.elements.inverse_jacobian[element]
+ for j in eachnode(dg), i in eachnode(dg)
+ var = u[variable, i, j, element]
+ if var < 0
+ error("Safe $variable is not safe. element=$element, node: $i $j, value=$var")
+ end
+
+ # Compute bound
+ var_min[i, j, element] = positivity_correction_factor * var
+
+ # Real one-sided Zalesak-type limiter
+ # * Zalesak (1979). "Fully multidimensional flux-corrected transport algorithms for fluids"
+ # * Kuzmin et al. (2010). "Failsafe flux limiting and constrained data projections for equations of gas dynamics"
+ # Note: The Zalesak limiter has to be computed, even if the state is valid, because the correction is
+ # for each interface, not each node
+ Qm = min(0, (var_min[i, j, element] - var) / dt)
+
+ # Calculate Pm
+ # Note: Boundaries of antidiffusive_flux1/2 are constant 0, so they make no difference here.
+ val_flux1_local = inverse_weights[i] *
+ antidiffusive_flux1[variable, i, j, element]
+ val_flux1_local_ip1 = -inverse_weights[i] *
+ antidiffusive_flux1[variable, i + 1, j, element]
+ val_flux2_local = inverse_weights[j] *
+ antidiffusive_flux2[variable, i, j, element]
+ val_flux2_local_jp1 = -inverse_weights[j] *
+ antidiffusive_flux2[variable, i, j + 1, element]
+
+ Pm = min(0, val_flux1_local) + min(0, val_flux1_local_ip1) +
+ min(0, val_flux2_local) + min(0, val_flux2_local_jp1)
+ Pm = inverse_jacobian * Pm
+
+ # Compute blending coefficient avoiding division by zero
+ # (as in paper of [Guermond, Nazarov, Popov, Thomas] (4.8))
+ Qm = abs(Qm) / (abs(Pm) + eps(typeof(Qm)) * 100)
+
+ # Calculate alpha
+ alpha[i, j, element] = max(alpha[i, j, element], 1 - Qm)
+ end
+ end
+
+ return nothing
+end
+end # @muladd
diff --git a/src/time_integration/methods_SSP.jl b/src/time_integration/methods_SSP.jl
new file mode 100644
index 00000000000..8ecad69748b
--- /dev/null
+++ b/src/time_integration/methods_SSP.jl
@@ -0,0 +1,241 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# Abstract base type for time integration schemes of explicit strong stability-preserving (SSP)
+# Runge-Kutta (RK) methods. They are high-order time discretizations that guarantee the TVD property.
+abstract type SimpleAlgorithmSSP end
+
+"""
+ SimpleSSPRK33(; stage_callbacks=())
+
+The third-order SSP Runge-Kutta method of Shu and Osher.
+
+## References
+
+- Shu, Osher (1988)
+ "Efficient Implementation of Essentially Non-oscillatory Shock-Capturing Schemes" (Eq. 2.18)
+ [DOI: 10.1016/0021-9991(88)90177-5](https://doi.org/10.1016/0021-9991(88)90177-5)
+
+!!! warning "Experimental implementation"
+ This is an experimental feature and may change in future releases.
+"""
+struct SimpleSSPRK33{StageCallbacks} <: SimpleAlgorithmSSP
+ a::SVector{3, Float64}
+ b::SVector{3, Float64}
+ c::SVector{3, Float64}
+ stage_callbacks::StageCallbacks
+
+ function SimpleSSPRK33(; stage_callbacks = ())
+ a = SVector(0.0, 3 / 4, 1 / 3)
+ b = SVector(1.0, 1 / 4, 2 / 3)
+ c = SVector(0.0, 1.0, 1 / 2)
+
+ # Butcher tableau
+ # c | a
+ # 0 |
+ # 1 | 1
+ # 1/2 | 1/4 1/4
+ # --------------------
+ # b | 1/6 1/6 2/3
+
+ new{typeof(stage_callbacks)}(a, b, c, stage_callbacks)
+ end
+end
+
+# This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L1
+mutable struct SimpleIntegratorSSPOptions{Callback}
+ callback::Callback # callbacks; used in Trixi
+ adaptive::Bool # whether the algorithm is adaptive; ignored
+ dtmax::Float64 # ignored
+ maxiters::Int # maximal number of time steps
+ tstops::Vector{Float64} # tstops from https://diffeq.sciml.ai/v6.8/basics/common_solver_opts/#Output-Control-1; ignored
+end
+
+function SimpleIntegratorSSPOptions(callback, tspan; maxiters = typemax(Int), kwargs...)
+ SimpleIntegratorSSPOptions{typeof(callback)}(callback, false, Inf, maxiters,
+ [last(tspan)])
+end
+
+# This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L77
+# This implements the interface components described at
+# https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1
+# which are used in Trixi.
+mutable struct SimpleIntegratorSSP{RealT <: Real, uType, Params, Sol, F, Alg,
+ SimpleIntegratorSSPOptions}
+ u::uType
+ du::uType
+ r0::uType
+ t::RealT
+ dt::RealT # current time step
+ dtcache::RealT # ignored
+ iter::Int # current number of time steps (iteration)
+ p::Params # will be the semidiscretization from Trixi
+ sol::Sol # faked
+ f::F
+ alg::Alg
+ opts::SimpleIntegratorSSPOptions
+ finalstep::Bool # added for convenience
+end
+
+# Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771)
+function Base.getproperty(integrator::SimpleIntegratorSSP, field::Symbol)
+ if field === :stats
+ return (naccept = getfield(integrator, :iter),)
+ end
+ # general fallback
+ return getfield(integrator, field)
+end
+
+"""
+ solve(ode, alg; dt, callbacks, kwargs...)
+
+The following structures and methods provide the infrastructure for SSP Runge-Kutta methods
+of type `SimpleAlgorithmSSP`.
+
+!!! warning "Experimental implementation"
+ This is an experimental feature and may change in future releases.
+"""
+function solve(ode::ODEProblem, alg = SimpleSSPRK33()::SimpleAlgorithmSSP;
+ dt, callback = nothing, kwargs...)
+ u = copy(ode.u0)
+ du = similar(u)
+ r0 = similar(u)
+ t = first(ode.tspan)
+ iter = 0
+ integrator = SimpleIntegratorSSP(u, du, r0, t, dt, zero(dt), iter, ode.p,
+ (prob = ode,), ode.f, alg,
+ SimpleIntegratorSSPOptions(callback, ode.tspan;
+ kwargs...), false)
+
+ # resize container
+ resize!(integrator.p, nelements(integrator.p.solver, integrator.p.cache))
+
+ # initialize callbacks
+ if callback isa CallbackSet
+ for cb in callback.continuous_callbacks
+ error("unsupported")
+ end
+ for cb in callback.discrete_callbacks
+ cb.initialize(cb, integrator.u, integrator.t, integrator)
+ end
+ elseif !isnothing(callback)
+ error("unsupported")
+ end
+
+ for stage_callback in alg.stage_callbacks
+ init_callback(stage_callback, integrator.p)
+ end
+
+ solve!(integrator)
+end
+
+function solve!(integrator::SimpleIntegratorSSP)
+ @unpack prob = integrator.sol
+ @unpack alg = integrator
+ t_end = last(prob.tspan)
+ callbacks = integrator.opts.callback
+
+ integrator.finalstep = false
+ while !integrator.finalstep
+ if isnan(integrator.dt)
+ error("time step size `dt` is NaN")
+ end
+
+ # if the next iteration would push the simulation beyond the end time, set dt accordingly
+ if integrator.t + integrator.dt > t_end ||
+ isapprox(integrator.t + integrator.dt, t_end)
+ integrator.dt = t_end - integrator.t
+ terminate!(integrator)
+ end
+
+ @. integrator.r0 = integrator.u
+ for stage in eachindex(alg.c)
+ t_stage = integrator.t + integrator.dt * alg.c[stage]
+ # compute du
+ integrator.f(integrator.du, integrator.u, integrator.p, t_stage)
+
+ # perform forward Euler step
+ @. integrator.u = integrator.u + integrator.dt * integrator.du
+
+ for stage_callback in alg.stage_callbacks
+ stage_callback(integrator.u, integrator, stage)
+ end
+
+ # perform convex combination
+ @. integrator.u = alg.a[stage] * integrator.r0 + alg.b[stage] * integrator.u
+ end
+
+ integrator.iter += 1
+ integrator.t += integrator.dt
+
+ # handle callbacks
+ if callbacks isa CallbackSet
+ for cb in callbacks.discrete_callbacks
+ if cb.condition(integrator.u, integrator.t, integrator)
+ cb.affect!(integrator)
+ end
+ end
+ end
+
+ # respect maximum number of iterations
+ if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep
+ @warn "Interrupted. Larger maxiters is needed."
+ terminate!(integrator)
+ end
+ end
+
+ for stage_callback in alg.stage_callbacks
+ finalize_callback(stage_callback, integrator.p)
+ end
+
+ return TimeIntegratorSolution((first(prob.tspan), integrator.t),
+ (prob.u0, integrator.u), prob)
+end
+
+# get a cache where the RHS can be stored
+get_du(integrator::SimpleIntegratorSSP) = integrator.du
+get_tmp_cache(integrator::SimpleIntegratorSSP) = (integrator.r0,)
+
+# some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u
+u_modified!(integrator::SimpleIntegratorSSP, ::Bool) = false
+
+# used by adaptive timestepping algorithms in DiffEq
+function set_proposed_dt!(integrator::SimpleIntegratorSSP, dt)
+ integrator.dt = dt
+end
+
+# stop the time integration
+function terminate!(integrator::SimpleIntegratorSSP)
+ integrator.finalstep = true
+ empty!(integrator.opts.tstops)
+end
+
+# used for AMR
+function Base.resize!(integrator::SimpleIntegratorSSP, new_size)
+ resize!(integrator.u, new_size)
+ resize!(integrator.du, new_size)
+ resize!(integrator.r0, new_size)
+
+ # Resize container
+ resize!(integrator.p, new_size)
+end
+
+function Base.resize!(semi::AbstractSemidiscretization, new_size)
+ resize!(semi, semi.solver.volume_integral, new_size)
+end
+
+Base.resize!(semi, volume_integral::AbstractVolumeIntegral, new_size) = nothing
+
+function Base.resize!(semi, volume_integral::VolumeIntegralSubcellLimiting, new_size)
+ # Resize container antidiffusive_fluxes
+ resize!(semi.cache.antidiffusive_fluxes, new_size)
+
+ # Resize container subcell_limiter_coefficients
+ @unpack limiter = volume_integral
+ resize!(limiter.cache.subcell_limiter_coefficients, new_size)
+end
+end # @muladd
diff --git a/src/time_integration/time_integration.jl b/src/time_integration/time_integration.jl
index 539e00ff700..c1e53527121 100644
--- a/src/time_integration/time_integration.jl
+++ b/src/time_integration/time_integration.jl
@@ -15,4 +15,5 @@ end
include("methods_2N.jl")
include("methods_3Sstar.jl")
+include("methods_SSP.jl")
end # @muladd
diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl
index 6de380288db..e1e3ad32e7d 100644
--- a/test/test_tree_2d_euler.jl
+++ b/test/test_tree_2d_euler.jl
@@ -63,6 +63,12 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
linf = [0.18527440131928286, 0.2404798030563736, 0.23269573860381076, 0.6874012187446894])
end
+ @trixi_testset "elixir_euler_shockcapturing_subcell.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_shockcapturing_subcell.jl"),
+ l2 = [0.08508147906199143, 0.04510299017724501, 0.045103019801950375, 0.6930704343869766],
+ linf = [0.31123546471463326, 0.5616274869594462, 0.5619692712224448, 2.88670199345138])
+ end
+
@trixi_testset "elixir_euler_blast_wave.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_blast_wave.jl"),
l2 = [0.14170569763947993, 0.11647068900798814, 0.11647072556898294, 0.3391989213659599],
diff --git a/test/test_tree_2d_eulermulti.jl b/test/test_tree_2d_eulermulti.jl
index 800dc31f84f..606afca1034 100644
--- a/test/test_tree_2d_eulermulti.jl
+++ b/test/test_tree_2d_eulermulti.jl
@@ -19,6 +19,14 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
tspan = (0.0, 0.001))
end
+ @trixi_testset "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl"),
+ l2 = [81.52845664909304, 2.5455678559421346, 63229.190712645846, 0.19929478404550321, 0.011068604228443425],
+ linf = [249.21708417382013, 40.33299887640794, 174205.0118831558, 0.6881458768113586, 0.11274401158173972],
+ initial_refinement_level = 3,
+ tspan = (0.0, 0.001))
+ end
+
@trixi_testset "elixir_eulermulti_ec.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_ec.jl"),
l2 = [0.050182236154087095, 0.050189894464434635, 0.2258715597305131, 0.06175171559771687],
diff --git a/test/test_unit.jl b/test/test_unit.jl
index e70a9be6a4a..5c5291c2430 100644
--- a/test/test_unit.jl
+++ b/test/test_unit.jl
@@ -402,6 +402,9 @@ isdir(outdir) && rm(outdir, recursive=true)
indicator_hg = IndicatorHennemannGassner(1.0, 0.0, true, "variable", "cache")
@test_nowarn show(stdout, indicator_hg)
+ limiter_idp = SubcellLimiterIDP(true, [1], 0.1, "cache")
+ @test_nowarn show(stdout, limiter_idp)
+
# TODO: TrixiShallowWater: move unit test
indicator_hg_swe = IndicatorHennemannGassnerShallowWater(1.0, 0.0, true, "variable", "cache")
@test_nowarn show(stdout, indicator_hg_swe)
@@ -637,7 +640,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
end
@timed_testset "Consistency check for HLL flux (naive): SWE" begin
@@ -674,7 +677,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(0.0, 1.0),
SVector(0.5, -0.5),
SVector(-1.2, 0.3)]
- orientations = [1, 2]
+ orientations = [1, 2]
u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
@@ -704,7 +707,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for u in u_values, normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
end
@timed_testset "Consistency check for HLL flux with Davis wave speed estimates: CEE" begin
@@ -718,7 +721,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for orientation in orientations
@test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
end
-
+
equations = CompressibleEulerEquations2D(1.4)
u = SVector(1.1, -0.5, 2.34, 5.5)
@@ -734,7 +737,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
equations = CompressibleEulerEquations3D(1.4)
u = SVector(1.1, -0.5, 2.34, 2.4, 5.5)
@@ -752,7 +755,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
end
@timed_testset "Consistency check for HLL flux with Davis wave speed estimates: LEE" begin
@@ -815,7 +818,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(0.0, 1.0),
SVector(0.5, -0.5),
SVector(-1.2, 0.3)]
- orientations = [1, 2]
+ orientations = [1, 2]
u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
@@ -845,7 +848,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for u in u_values, normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
end
@timed_testset "Consistency check for HLLE flux: CEE" begin
@@ -873,7 +876,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
equations = CompressibleEulerEquations3D(1.4)
u = SVector(1.1, -0.5, 2.34, 2.4, 5.5)
@@ -891,7 +894,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
end
@timed_testset "Consistency check for HLLE flux: SWE" begin
@@ -907,7 +910,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(0.0, 1.0),
SVector(0.5, -0.5),
SVector(-1.2, 0.3)]
- orientations = [1, 2]
+ orientations = [1, 2]
u = SVector(1, 0.5, 0.5, 0.0)
@@ -937,7 +940,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(0.0, 1.0),
SVector(0.5, -0.5),
SVector(-1.2, 0.3)]
- orientations = [1, 2]
+ orientations = [1, 2]
u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
@@ -956,7 +959,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(0.0, 0.0, 1.0),
SVector(0.5, -0.5, 0.2),
SVector(-1.2, 0.3, 1.4)]
- orientations = [1, 2, 3]
+ orientations = [1, 2, 3]
u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
@@ -967,7 +970,7 @@ isdir(outdir) && rm(outdir, recursive=true)
for u in u_values, normal_direction in normal_directions
@test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
- end
+ end
end
@timed_testset "Consistency check for Godunov flux" begin
@@ -1137,7 +1140,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(-1.2, 0.3)]
u_values = [SVector(1.0, 0.5, -0.7, 1.0),
SVector(1.5, -0.2, 0.1, 5.0),]
- fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber,
+ fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber,
flux_hll, FluxHLL(min_max_speed_davis)]
for f_std in fluxes
@@ -1157,7 +1160,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(-1.2, 0.3, 1.4)]
u_values = [SVector(1.0, 0.5, -0.7, 0.1, 1.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0),]
- fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, FluxLMARS(340),
+ fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, FluxLMARS(340),
flux_hll, FluxHLL(min_max_speed_davis)]
for f_std in fluxes
@@ -1173,11 +1176,11 @@ isdir(outdir) && rm(outdir, recursive=true)
normal_directions = [SVector(1.0, 0.0),
SVector(0.0, 1.0),
SVector(0.5, -0.5),
- SVector(-1.2, 0.3)]
+ SVector(-1.2, 0.3)]
u = SVector(1, 0.5, 0.5, 0.0)
- fluxes = [flux_central, flux_fjordholm_etal, flux_wintermeyer_etal,
+ fluxes = [flux_central, flux_fjordholm_etal, flux_wintermeyer_etal,
flux_hll, FluxHLL(min_max_speed_davis), FluxHLL(min_max_speed_einfeldt)]
end
From 925c0474938a2b6e095a795a4c155b40965625e0 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 22 Aug 2023 15:58:08 +0200
Subject: [PATCH 05/36] set version to v0.5.39
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index dd937ed213b..6a11655d345 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.39-pre"
+version = "0.5.39"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From add3a7f9e9c4b7445ee669e43afd8b84350e04d4 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 22 Aug 2023 15:58:20 +0200
Subject: [PATCH 06/36] set development version to v0.5.40-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 6a11655d345..4374eaa3b0a 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.39"
+version = "0.5.40-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From bfefef15d3ccd71f43a5518560d0cd96b599881b Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Tue, 22 Aug 2023 17:21:45 +0200
Subject: [PATCH 07/36] Update visualization.md (#1612)
Compare
[examples/tree\_2d\_dgsem/elixir\_advection\_amr\_visualization.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl):
to (existing)
[examples/tree_2d_dgsem/elixir\_advection\_amr\_visualization.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl):
---
docs/src/visualization.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/src/visualization.md b/docs/src/visualization.md
index 8f72bb4b1c6..4e4b780004d 100644
--- a/docs/src/visualization.md
+++ b/docs/src/visualization.md
@@ -375,7 +375,7 @@ During the simulation, the visualization callback creates and displays
visualizations of the current solution in regular intervals. This can be useful
to, e.g., monitor the validity of a long-running simulation or for illustrative
purposes. An example for how to create a `VisualizationCallback` can be found in
-[examples/tree_2d_dgsem/elixir\_advection\_amr\_visualization.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl):
+[examples/tree\_2d\_dgsem/elixir\_advection\_amr\_visualization.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl):
```julia
[...]
From e22b11e5859b5168e24dabb4cb34a6a35fce3e66 Mon Sep 17 00:00:00 2001
From: Michael Schlottke-Lakemper
Date: Tue, 22 Aug 2023 19:30:35 +0200
Subject: [PATCH 08/36] Add review checklist to new PRs (#1609)
Co-authored-by: Hendrik Ranocha
---
.github/review-checklist.md | 38 +++++++++++++++++++++++++++
.github/workflows/ReviewChecklist.yml | 20 ++++++++++++++
2 files changed, 58 insertions(+)
create mode 100644 .github/review-checklist.md
create mode 100644 .github/workflows/ReviewChecklist.yml
diff --git a/.github/review-checklist.md b/.github/review-checklist.md
new file mode 100644
index 00000000000..2d8a24f1971
--- /dev/null
+++ b/.github/review-checklist.md
@@ -0,0 +1,38 @@
+### Review checklist
+
+This checklist is meant to assist creators of PRs (to let them know what reviewers will typically look for) and reviewers (to guide them in a structured review process). Items do not need to be checked explicitly for a PR to be eligible for merging.
+
+#### Purpose and scope
+- [ ] The PR has a single goal that is clear from the PR title and/or description.
+- [ ] All code changes represent a single set of modifications that logically belong together.
+- [ ] No more than 500 lines of code are changed or there is no obvious way to split the PR into multiple PRs.
+
+#### Code quality
+- [ ] The code can be understood easily.
+- [ ] Newly introduced names for variables etc. are self-descriptive and consistent with existing naming conventions.
+- [ ] There are no redundancies that can be removed by simple modularization/refactoring.
+- [ ] There are no leftover debug statements or commented code sections.
+- [ ] The code adheres to our [conventions](https://trixi-framework.github.io/Trixi.jl/stable/conventions/) and [style guide](https://trixi-framework.github.io/Trixi.jl/stable/styleguide/), and to the [Julia guidelines](https://docs.julialang.org/en/v1/manual/style-guide/).
+
+#### Documentation
+- [ ] New functions and types are documented with a docstring or top-level comment.
+- [ ] Relevant publications are referenced in docstrings (see [example](https://github.com/trixi-framework/Trixi.jl/blob/7f83a1a938eecd9b841efe215a6e482e67cfdcc1/src/equations/compressible_euler_2d.jl#L601-L615) for formatting).
+- [ ] Inline comments are used to document longer or unusual code sections.
+- [ ] Comments describe intent ("why?") and not just functionality ("what?").
+- [ ] If the PR introduces a significant change or new feature, it is documented in `NEWS.md`.
+
+#### Testing
+- [ ] The PR passes all tests.
+- [ ] New or modified lines of code are covered by tests.
+- [ ] New or modified tests run in less then 10 seconds.
+
+#### Performance
+- [ ] There are no type instabilities or memory allocations in performance-critical parts.
+- [ ] If the PR intent is to improve performance, before/after [time measurements](https://trixi-framework.github.io/Trixi.jl/stable/performance/#Manual-benchmarking) are posted in the PR.
+
+#### Verification
+- [ ] The correctness of the code was verified using appropriate tests.
+- [ ] If new equations/methods are added, a convergence test has been run and the results
+ are posted in the PR.
+
+*Created with :heart: by the Trixi.jl community.*
\ No newline at end of file
diff --git a/.github/workflows/ReviewChecklist.yml b/.github/workflows/ReviewChecklist.yml
new file mode 100644
index 00000000000..959a04752d7
--- /dev/null
+++ b/.github/workflows/ReviewChecklist.yml
@@ -0,0 +1,20 @@
+name: Add review checklist
+
+on:
+ pull_request_target:
+ types: [opened]
+
+permissions:
+ pull-requests: write
+
+jobs:
+ add-review-checklist:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ - name: Add review checklist
+ uses: trixi-framework/add-pr-review-checklist@v1
+ with:
+ file: '.github/review-checklist.md'
+ no-checklist-keyword: '[no checklist]'
\ No newline at end of file
From e82ebb557a280abb87e7b6f3db9d90bcac715321 Mon Sep 17 00:00:00 2001
From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
Date: Wed, 30 Aug 2023 20:16:45 +0200
Subject: [PATCH 09/36] unify how we refer to links to elixirs in docs (#1620)
---
.../src/files/differentiable_programming.jl | 2 +-
docs/literate/src/files/index.jl | 2 +-
docs/src/callbacks.md | 22 +++++++++----------
docs/src/meshes/dgmulti_mesh.md | 14 +++++++-----
docs/src/overview.md | 4 ++--
docs/src/restart.md | 10 ++++-----
docs/src/visualization.md | 2 +-
7 files changed, 30 insertions(+), 26 deletions(-)
diff --git a/docs/literate/src/files/differentiable_programming.jl b/docs/literate/src/files/differentiable_programming.jl
index ecc09d05dcf..5c5a7cd7440 100644
--- a/docs/literate/src/files/differentiable_programming.jl
+++ b/docs/literate/src/files/differentiable_programming.jl
@@ -128,7 +128,7 @@ condition_number = cond(V)
# you can compute the gradient of an entropy-dissipative semidiscretization with respect to the
# ideal gas constant of the compressible Euler equations as described in the following. This example
# is also available as the elixir
-# [examples/special\_elixirs/elixir\_euler\_ad.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/special_elixirs/elixir_euler_ad.jl)
+# [`examples/special_elixirs/elixir_euler_ad.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/special_elixirs/elixir_euler_ad.jl)
# First, we create a semidiscretization of the compressible Euler equations.
diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl
index 5b669881502..0c8de66bf42 100644
--- a/docs/literate/src/files/index.jl
+++ b/docs/literate/src/files/index.jl
@@ -116,7 +116,7 @@
# ## Examples in Trixi.jl
# Trixi.jl already contains several more coding examples, the so-called `elixirs`. You can find them
-# in the folder [`examples`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/).
+# in the folder [`examples/`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/).
# They are structured by the underlying mesh type and the respective number of spatial dimensions.
# The name of an elixir is composed of the underlying system of conservation equations (for instance
# `advection` or `euler`) and other special characteristics like the initial condition
diff --git a/docs/src/callbacks.md b/docs/src/callbacks.md
index a85f8e8191b..1d3e5e34b51 100644
--- a/docs/src/callbacks.md
+++ b/docs/src/callbacks.md
@@ -15,7 +15,7 @@ control, adaptive mesh refinement, I/O, and more.
### CFL-based time step control
Time step control can be performed with a [`StepsizeCallback`](@ref). An example making use
-of this can be found at [examples/tree_2d_dgsem/elixir\_advection\_basic.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_basic.jl)
+of this can be found at [`examples/tree_2d_dgsem/elixir_advection_basic.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_basic.jl)
### Adaptive mesh refinement
Trixi.jl uses a hierarchical Cartesian mesh which can be locally refined in a solution-adaptive way.
@@ -24,12 +24,12 @@ passing an [`AMRCallback`](@ref) to the ODE solver. The `AMRCallback` requires a
[`ControllerThreeLevel`](@ref) or [`ControllerThreeLevelCombined`](@ref) to tell the AMR
algorithm which cells to refine/coarsen.
-An example elixir using AMR can be found at [examples/tree_2d_dgsem/elixir\_advection\_amr.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr.jl).
+An example elixir using AMR can be found at [`examples/tree_2d_dgsem/elixir_advection_amr.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr.jl).
### Analyzing the numerical solution
The [`AnalysisCallback`](@ref) can be used to analyze the numerical solution, e.g. calculate
errors or user-specified integrals, and print the results to the screen. The results can also be
-saved in a file. An example can be found at [examples/tree_2d_dgsem/elixir\_euler\_vortex.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_vortex.jl).
+saved in a file. An example can be found at [`examples/tree_2d_dgsem/elixir_euler_vortex.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_vortex.jl).
In [Performance metrics of the `AnalysisCallback`](@ref) you can find a detailed
description of the different performance metrics the `AnalysisCallback` computes.
@@ -38,15 +38,15 @@ description of the different performance metrics the `AnalysisCallback` computes
#### Solution and restart files
To save the solution in regular intervals you can use a [`SaveSolutionCallback`](@ref). It is also
possible to create restart files using the [`SaveRestartCallback`](@ref). An example making use
-of these can be found at [examples/tree_2d_dgsem/elixir\_advection\_extended.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_extended.jl).
+of these can be found at [`examples/tree_2d_dgsem/elixir_advection_extended.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_extended.jl).
An example showing how to restart a simulation from a restart file can be found at
-[examples/tree_2d_dgsem/elixir\_advection\_restart.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_restart.jl).
+[`examples/tree_2d_dgsem/elixir_advection_restart.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_restart.jl).
#### Time series
Sometimes it is useful to record the evaluations of state variables over time at
a given set of points. This can be achieved by the [`TimeSeriesCallback`](@ref), which is used,
e.g., in
-[examples/tree_2d_dgsem/elixir\_acoustics\_gaussian\_source.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_acoustics_gaussian_source.jl).
+[`examples/tree_2d_dgsem/elixir_acoustics_gaussian_source.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_acoustics_gaussian_source.jl).
The `TimeSeriesCallback` constructor expects a semidiscretization and a list of points at
which the solution should be recorded in regular time step intervals. After the
last time step, the entire record is stored in an HDF5 file.
@@ -113,12 +113,12 @@ will yield the following plot:
Some callbacks provided by Trixi.jl implement specific features for certain equations:
* The [`LBMCollisionCallback`](@ref) implements the Lattice-Boltzmann method (LBM) collision
operator and should only be used when solving the Lattice-Boltzmann equations. See e.g.
- [examples/tree_2d_dgsem/elixir\_lbm\_constant.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_lbm_constant.jl)
+ [`examples/tree_2d_dgsem/elixir_lbm_constant.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_lbm_constant.jl)
* The [`SteadyStateCallback`](@ref) terminates the time integration when the residual steady state
falls below a certain threshold. This checks the convergence of the potential ``\phi`` for
- hyperbolic diffusion. See e.g. [examples/tree_2d_dgsem/elixir\_hypdiff\_nonperiodic.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_hypdiff_nonperiodic.jl).
+ hyperbolic diffusion. See e.g. [`examples/tree_2d_dgsem/elixir_hypdiff_nonperiodic.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_hypdiff_nonperiodic.jl).
* The [`GlmSpeedCallback`](@ref) updates the divergence cleaning wave speed `c_h` for the ideal
- GLM-MHD equations. See e.g. [examples/tree_2d_dgsem/elixir\_mhd\_alfven\_wave.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_mhd_alfven_wave.jl).
+ GLM-MHD equations. See e.g. [`examples/tree_2d_dgsem/elixir_mhd_alfven_wave.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_mhd_alfven_wave.jl).
## Usage of step callbacks
Step callbacks are passed to the `solve` method from the ODE solver via the keyword argument
@@ -152,7 +152,7 @@ more callbacks, you need to turn them into a `CallbackSet` first by calling
## Stage callbacks
[`PositivityPreservingLimiterZhangShu`](@ref) is a positivity-preserving limiter, used to enforce
physical constraints. An example elixir using this feature can be found at
-[examples/tree_2d_dgsem/elixir\_euler\_positivity.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_positivity.jl).
+[`examples/tree_2d_dgsem/elixir_euler_positivity.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_positivity.jl).
## Implementing new callbacks
Since Trixi.jl is compatible with [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl),
@@ -162,4 +162,4 @@ Step callbacks are just called [callbacks](https://diffeq.sciml.ai/latest/featur
Stage callbacks are called [`stage_limiter!`](https://diffeq.sciml.ai/latest/solvers/ode_solve/#Explicit-Strong-Stability-Preserving-Runge-Kutta-Methods-for-Hyperbolic-PDEs-(Conservation-Laws)).
An example elixir showing how to implement a new simple stage callback and a new simple step
-callback can be found at [examples/tree_2d_dgsem/elixir\_advection\_callbacks.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_callbacks.jl).
+callback can be found at [`examples/tree_2d_dgsem/elixir_advection_callbacks.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_callbacks.jl).
diff --git a/docs/src/meshes/dgmulti_mesh.md b/docs/src/meshes/dgmulti_mesh.md
index e07ba70a80a..fc086bba146 100644
--- a/docs/src/meshes/dgmulti_mesh.md
+++ b/docs/src/meshes/dgmulti_mesh.md
@@ -81,16 +81,20 @@ type, but will be more efficient at high orders of approximation.
## Trixi.jl elixirs on simplicial and tensor product element meshes
Example elixirs with triangular, quadrilateral, and tetrahedral meshes can be found in
-the `examples/dgmulti_2d` and `examples/dgmulti_3d` folders. Some key elixirs to look at:
+the [`examples/dgmulti_2d/`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/dgmulti_2d/)
+and [`examples/dgmulti_3d/`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/dgmulti_3d/)
+folders. Some key elixirs to look at:
-* `examples/dgmulti_2d/elixir_euler_weakform.jl`: basic weak form DG discretization on a uniform triangular mesh.
+* [`examples/dgmulti_2d/elixir_euler_weakform.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/dgmulti_2d/elixir_euler_weakform.jl):
+ basic weak form DG discretization on a uniform triangular mesh.
Changing `element_type = Quad()` or `approximation_type = SBP()` will switch to a quadrilateral mesh
or an SBP-type discretization. Changing `surface_integral = SurfaceIntegralWeakForm(flux_ec)` and
`volume_integral = VolumeIntegralFluxDifferencing(flux_ec)` for some entropy conservative flux
(e.g., [`flux_chandrashekar`](@ref) or [`flux_ranocha`](@ref)) will switch to an entropy conservative formulation.
-* `examples/dgmulti_2d/elixir_euler_triangulate_pkg_mesh.jl`: uses an unstructured mesh generated by
- [Triangulate.jl](https://github.com/JuliaGeometry/Triangulate.jl).
-* `examples/dgmulti_3d/elixir_euler_weakform.jl`: basic weak form DG discretization on a uniform tetrahedral mesh.
+* [`examples/dgmulti_2d/elixir_euler_triangulate_pkg_mesh.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/dgmulti_2d/elixir_euler_triangulate_pkg_mesh.jl):
+ uses an unstructured mesh generated by [Triangulate.jl](https://github.com/JuliaGeometry/Triangulate.jl).
+* [`examples/dgmulti_3d/elixir_euler_weakform.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/dgmulti_3d/elixir_euler_weakform.jl):
+ ´basic weak form DG discretization on a uniform tetrahedral mesh.
Changing `element_type = Hex()` will switch to a hexahedral mesh. Changing
`surface_integral = SurfaceIntegralWeakForm(flux_ec)` and
`volume_integral = VolumeIntegralFluxDifferencing(flux_ec)` for some entropy conservative flux
diff --git a/docs/src/overview.md b/docs/src/overview.md
index 519ec2ca424..46bc28b6025 100644
--- a/docs/src/overview.md
+++ b/docs/src/overview.md
@@ -5,7 +5,7 @@ conservation laws. Thus, it is not a monolithic PDE solver that is configured at
via parameter files, as it is often found in classical numerical simulation codes.
Instead, each simulation is configured by pure Julia code. Many examples of such
simulation setups, called *elixirs* in Trixi.jl, are provided in the
-[examples](https://github.com/trixi-framework/Trixi.jl/blob/main/examples)
+[`examples/`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples)
folder.
Trixi.jl uses the method of lines, i.e., the full space-time discretization is separated into two steps;
@@ -77,7 +77,7 @@ Further information can be found in the
## Next steps
We explicitly encourage people interested in Trixi.jl to have a look at the
-[examples](https://github.com/trixi-framework/Trixi.jl/blob/main/examples)
+[`examples/`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples)
bundled with Trixi.jl to get an impression of what is possible and the general
look and feel of Trixi.jl.
Before doing that, it is usually good to get an idea of
diff --git a/docs/src/restart.md b/docs/src/restart.md
index d24d93cb297..767269ff27d 100644
--- a/docs/src/restart.md
+++ b/docs/src/restart.md
@@ -18,7 +18,7 @@ save_restart = SaveRestartCallback(interval=100,
Make this part of your `CallbackSet`.
An example is
-[```examples/examples/structured_2d_dgsem/elixir_advection_extended.jl```](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_extended.jl).
+[`examples/examples/structured_2d_dgsem/elixir_advection_extended.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_extended.jl).
## [Perform the simulation restart](@id restart_perform)
@@ -26,7 +26,7 @@ Since all of the information about the simulation can be obtained from the
last snapshot, the restart can be done with relatively few lines
in an extra elixir file.
However, some might prefer to keep everything in one elixir and
-conditionals like ```if restart``` with a boolean variable ```restart``` that is user defined.
+conditionals like `if restart` with a boolean variable `restart` that is user defined.
First we need to define from which file we want to restart, e.g.
```julia
@@ -50,7 +50,7 @@ time the one form the snapshot:
tspan = (load_time(restart_filename), 2.0)
```
-We now also take the last ```dt```, so that our solver does not need to first find
+We now also take the last `dt`, so that our solver does not need to first find
one to fulfill the CFL condition:
```julia
dt = load_dt(restart_filename)
@@ -63,7 +63,7 @@ ode = semidiscretize(semi, tspan, restart_filename)
You should now define a [`SaveSolutionCallback`](@ref) similar to the
[original simulation](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_extended.jl),
-but with ```save_initial_solution=false```, otherwise our initial snapshot will be overwritten.
+but with `save_initial_solution=false`, otherwise our initial snapshot will be overwritten.
If you are using one file for the original simulation and the restart
you can reuse your [`SaveSolutionCallback`](@ref), but need to set
```julia
@@ -86,4 +86,4 @@ Now we can compute the solution:
sol = solve!(integrator)
```
-An example is in `[``examples/structured_2d_dgsem/elixir_advection_restart.jl```](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_restart.jl).
+An example is in [`examples/structured_2d_dgsem/elixir_advection_restart.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_restart.jl).
diff --git a/docs/src/visualization.md b/docs/src/visualization.md
index 4e4b780004d..36a7e8f5ac8 100644
--- a/docs/src/visualization.md
+++ b/docs/src/visualization.md
@@ -375,7 +375,7 @@ During the simulation, the visualization callback creates and displays
visualizations of the current solution in regular intervals. This can be useful
to, e.g., monitor the validity of a long-running simulation or for illustrative
purposes. An example for how to create a `VisualizationCallback` can be found in
-[examples/tree\_2d\_dgsem/elixir\_advection\_amr\_visualization.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl):
+[`examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl):
```julia
[...]
From af70d89eb35b30561833a20a6d6d3bb6e9567264 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 1 Sep 2023 15:35:30 +0200
Subject: [PATCH 10/36] update affiliation of HR (#1621)
---
.zenodo.json | 2 +-
AUTHORS.md | 2 +-
README.md | 2 +-
docs/src/index.md | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.zenodo.json b/.zenodo.json
index 95879af1e90..905c0170ab9 100644
--- a/.zenodo.json
+++ b/.zenodo.json
@@ -15,7 +15,7 @@
"orcid": "0000-0002-1752-1158"
},
{
- "affiliation": "Applied Mathematics, University of Hamburg, Germany",
+ "affiliation": "Numerical Mathematics, Johannes Gutenberg University Mainz, Germany",
"name": "Ranocha, Hendrik",
"orcid": "0000-0002-3456-2277"
},
diff --git a/AUTHORS.md b/AUTHORS.md
index 74bfaa9c852..f1debf8ba76 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -12,7 +12,7 @@ provided substantial additions or modifications. Together, these two groups form
* [Gregor Gassner](https://www.mi.uni-koeln.de/NumSim/gregor-gassner),
University of Cologne, Germany
* [Hendrik Ranocha](https://ranocha.de),
- University of Hamburg, Germany
+ Johannes Gutenberg University Mainz, Germany
* [Andrew Winters](https://liu.se/en/employee/andwi94),
Linköping University, Sweden
* [Jesse Chan](https://jlchan.github.io),
diff --git a/README.md b/README.md
index 7eaee8750dd..63540b1f640 100644
--- a/README.md
+++ b/README.md
@@ -247,7 +247,7 @@ Schlottke-Lakemper](https://lakemper.eu)
(RWTH Aachen University/High-Performance Computing Center Stuttgart (HLRS), Germany) and
[Gregor Gassner](https://www.mi.uni-koeln.de/NumSim/gregor-gassner)
(University of Cologne, Germany). Together with [Hendrik Ranocha](https://ranocha.de)
-(University of Hamburg, Germany), [Andrew Winters](https://liu.se/en/employee/andwi94)
+(Johannes Gutenberg University Mainz, Germany), [Andrew Winters](https://liu.se/en/employee/andwi94)
(Linköping University, Sweden), and [Jesse Chan](https://jlchan.github.io) (Rice University, US),
they are the principal developers of Trixi.jl.
The full list of contributors can be found in [AUTHORS.md](AUTHORS.md).
diff --git a/docs/src/index.md b/docs/src/index.md
index 3af785bc681..bb2afd1019f 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -324,7 +324,7 @@ Schlottke-Lakemper](https://lakemper.eu)
(RWTH Aachen University/High-Performance Computing Center Stuttgart (HLRS), Germany) and
[Gregor Gassner](https://www.mi.uni-koeln.de/NumSim/gregor-gassner)
(University of Cologne, Germany). Together with [Hendrik Ranocha](https://ranocha.de)
-(University of Hamburg, Germany) and [Andrew Winters](https://liu.se/en/employee/andwi94)
+(Johannes Gutenberg University Mainz, Germany) and [Andrew Winters](https://liu.se/en/employee/andwi94)
(Linköping University, Sweden), and [Jesse Chan](https://jlchan.github.io) (Rice University, US),
they are the principal developers of Trixi.jl.
The full list of contributors can be found under [Authors](@ref).
From 6403a480825dcebdd10cb90584c5cc877b4b2c5d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 1 Sep 2023 19:13:47 +0200
Subject: [PATCH 11/36] Bump crate-ci/typos from 1.16.5 to 1.16.9 (#1622)
Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.5 to 1.16.9.
- [Release notes](https://github.com/crate-ci/typos/releases)
- [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crate-ci/typos/compare/v1.16.5...v1.16.9)
---
updated-dependencies:
- dependency-name: crate-ci/typos
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/SpellCheck.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
index 6ebb288ea30..a06121e7ca1 100644
--- a/.github/workflows/SpellCheck.yml
+++ b/.github/workflows/SpellCheck.yml
@@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v3
- name: Check spelling
- uses: crate-ci/typos@v1.16.5
+ uses: crate-ci/typos@v1.16.9
From bd5ba865478a889c48a7675072d921906c27c0c4 Mon Sep 17 00:00:00 2001
From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
Date: Wed, 6 Sep 2023 09:15:38 +0200
Subject: [PATCH 12/36] Update docs on how to use a system-provided MPI
installation with T8code.jl (#1613)
* update docs on how to use a system-provided MPI installation with T8code.jl
* reduce number of characters per line
* adjust path of shared object files
* fix typo
---
docs/src/parallelization.md | 26 +++++++++++++++++---------
1 file changed, 17 insertions(+), 9 deletions(-)
diff --git a/docs/src/parallelization.md b/docs/src/parallelization.md
index 08470fd064a..245fdc11852 100644
--- a/docs/src/parallelization.md
+++ b/docs/src/parallelization.md
@@ -53,16 +53,24 @@ a system-provided MPI installation with Trixi.jl can be found in the following s
### [Using a system-provided MPI installation](@id parallel_system_MPI)
-When using Trixi.jl with a system-provided MPI backend the underlying [`p4est`](https://github.com/cburstedde/p4est)
-library needs to be compiled with the same MPI installation. Therefore, you also need to use
-a system-provided `p4est` installation (for notes on how to install `p4est` see e.g.
-[here](https://github.com/cburstedde/p4est/blob/master/README), use the configure option
-`--enable-mpi`). In addition, [P4est.jl](https://github.com/trixi-framework/P4est.jl) needs to
-be configured to use the custom `p4est` installation. Follow the steps described
-[here](https://github.com/trixi-framework/P4est.jl/blob/main/README.md) for the configuration.
+When using Trixi.jl with a system-provided MPI backend the underlying
+[`p4est`](https://github.com/cburstedde/p4est) and [`t8code`](https://github.com/DLR-AMR/t8code)
+libraries need to be compiled with the same MPI installation. Therefore, you also need to
+use system-provided `p4est` and `t8code` installations (for notes on how to install `p4est`
+and `t8code` see e.g. [here](https://github.com/cburstedde/p4est/blob/master/README) and
+[here](https://github.com/DLR-AMR/t8code/wiki/Installation), use the configure option
+`--enable-mpi`). Note that `t8code` already comes with a `p4est` installation, so it suffices
+to install `t8code`. In addition, [P4est.jl](https://github.com/trixi-framework/P4est.jl) and
+[T8code.jl](https://github.com/DLR-AMR/T8code.jl) need to be configured to use the custom
+installations. Follow the steps described
+[here](https://github.com/DLR-AMR/T8code.jl/blob/main/README.md#installation) and
+[here](https://github.com/trixi-framework/P4est.jl/blob/main/README.md#installation) for the
+configuration. The paths that point to `libp4est.so` (and potentially to `libsc.so`) need to be
+the same for P4est.jl and T8code.jl. This could e.g. be `libp4est.so` that usually can be found
+in `lib/` or `local/lib/` in the installation directory of `t8code`.
In total, in your active Julia project you should have a LocalPreferences.toml file with sections
-`[MPIPreferences]` and `[P4est]` as well as an entry `MPIPreferences` in your Project.toml to
-use a custom MPI installation.
+`[MPIPreferences]`, `[T8code]` and `[P4est]` as well as an entry `MPIPreferences` in your
+Project.toml to use a custom MPI installation.
### [Usage](@id parallel_usage)
From 76b4c5fc842e47c4bd9f33c044ae99bd2dfbf789 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 6 Sep 2023 09:16:48 +0200
Subject: [PATCH 13/36] set version to v0.5.40
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 4374eaa3b0a..0d27c0fd6e8 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.40-pre"
+version = "0.5.40"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From f098ea20f545007d741745c160c7d1e1919733c4 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 6 Sep 2023 09:17:13 +0200
Subject: [PATCH 14/36] set development version to v0.5.41-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 0d27c0fd6e8..e14dbcd0c03 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.40"
+version = "0.5.41-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 81d2b70965f361b0bfd9b113ebe4fb360b1437cb Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 6 Sep 2023 15:33:48 +0200
Subject: [PATCH 15/36] workaround for allocations when broadcasting equations
(#1626)
---
src/equations/equations.jl | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/src/equations/equations.jl b/src/equations/equations.jl
index 90b2cd62191..570a25cece9 100644
--- a/src/equations/equations.jl
+++ b/src/equations/equations.jl
@@ -75,8 +75,14 @@ end
@inline Base.ndims(::AbstractEquations{NDIMS}) where {NDIMS} = NDIMS
-# equations act like scalars in broadcasting
-Base.broadcastable(equations::AbstractEquations) = Ref(equations)
+# Equations act like scalars in broadcasting.
+# Using `Ref(equations)` would be more convenient in some circumstances.
+# However, this does not work with Julia v1.9.3 correctly due to a (performance)
+# bug in Julia, see
+# - https://github.com/trixi-framework/Trixi.jl/pull/1618
+# - https://github.com/JuliaLang/julia/issues/51118
+# Thus, we use the workaround below.
+Base.broadcastable(equations::AbstractEquations) = (equations,)
"""
flux(u, orientation_or_normal, equations)
From 4e6d1638a279130e0f5008ff75acadb8307b0a6d Mon Sep 17 00:00:00 2001
From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
Date: Wed, 6 Sep 2023 17:00:08 +0200
Subject: [PATCH 16/36] increase absolute tolerance (#1625)
Co-authored-by: Hendrik Ranocha
---
test/test_tree_1d_shallowwater.jl | 3 ++-
test/test_trixi.jl | 2 +-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl
index cafa17edd4c..1e5aeac1786 100644
--- a/test/test_tree_1d_shallowwater.jl
+++ b/test/test_tree_1d_shallowwater.jl
@@ -102,7 +102,8 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem")
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_beach.jl"),
l2 = [0.17979210479598923, 1.2377495706611434, 6.289818963361573e-8],
linf = [0.845938394800688, 3.3740800777086575, 4.4541473087633676e-7],
- tspan = (0.0, 0.05))
+ tspan = (0.0, 0.05),
+ atol = 3e-10) # see https://github.com/trixi-framework/Trixi.jl/issues/1617
end
@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin
diff --git a/test/test_trixi.jl b/test/test_trixi.jl
index ddace6b4fbe..f2cd0cab94d 100644
--- a/test/test_trixi.jl
+++ b/test/test_trixi.jl
@@ -5,7 +5,7 @@ import Trixi
# inside an elixir.
"""
@test_trixi_include(elixir; l2=nothing, linf=nothing,
- atol=10*eps(), rtol=0.001,
+ atol=500*eps(), rtol=sqrt(eps()),
parameters...)
Test Trixi by calling `trixi_include(elixir; parameters...)`.
From a7867e7376541d1326f2796661f5ca35bc9fe499 Mon Sep 17 00:00:00 2001
From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
Date: Wed, 6 Sep 2023 18:38:45 +0200
Subject: [PATCH 17/36] Update docs for parallel HDF5 (#1504)
* update docs for parallel HDF5
* Update docs/src/parallelization.md
Co-authored-by: Hendrik Ranocha
* update docs on parallel HDF5
* bump compat for HDF5
* mention T8code
* reduce number of characters per line
* add information for older HDF5.jl versions
---------
Co-authored-by: Hendrik Ranocha
---
Project.toml | 2 +-
docs/src/parallelization.md | 41 +++++++++++++++++++++++++++----------
2 files changed, 31 insertions(+), 12 deletions(-)
diff --git a/Project.toml b/Project.toml
index e14dbcd0c03..41dde8662ab 100644
--- a/Project.toml
+++ b/Project.toml
@@ -56,7 +56,7 @@ DiffEqCallbacks = "2.25"
EllipsisNotation = "1.0"
FillArrays = "0.13.2, 1"
ForwardDiff = "0.10.18"
-HDF5 = "0.14, 0.15, 0.16"
+HDF5 = "0.14, 0.15, 0.16, 0.17"
IfElse = "0.1"
LinearMaps = "2.7, 3.0"
LoopVectorization = "0.12.118"
diff --git a/docs/src/parallelization.md b/docs/src/parallelization.md
index 245fdc11852..d56777c9af4 100644
--- a/docs/src/parallelization.md
+++ b/docs/src/parallelization.md
@@ -166,17 +166,36 @@ section, specifically at the descriptions of the performance index (PID).
### Using error-based step size control with MPI
-If you use error-based step size control (see also the section on [error-based adaptive step sizes](@ref adaptive_step_sizes))
-together with MPI you need to pass `internalnorm=ode_norm` and you should pass
-`unstable_check=ode_unstable_check` to OrdinaryDiffEq's [`solve`](https://docs.sciml.ai/DiffEqDocs/latest/basics/common_solver_opts/),
+If you use error-based step size control (see also the section on
+[error-based adaptive step sizes](@ref adaptive_step_sizes)) together with MPI you need to pass
+`internalnorm=ode_norm` and you should pass `unstable_check=ode_unstable_check` to
+OrdinaryDiffEq's [`solve`](https://docs.sciml.ai/DiffEqDocs/latest/basics/common_solver_opts/),
which are both included in [`ode_default_options`](@ref).
### Using parallel input and output
-Trixi.jl allows parallel I/O using MPI by leveraging parallel HDF5.jl. To enable this, you first need
-to use a system-provided MPI library, see also [here](@ref parallel_system_MPI) and you need to tell
-[HDF5.jl](https://github.com/JuliaIO/HDF5.jl) to use this library.
-To do so, set the environment variable `JULIA_HDF5_PATH` to the local path
-that contains the `libhdf5.so` shared object file and build HDF5.jl by executing `using Pkg; Pkg.build("HDF5")`.
-For more information see also the [documentation of HDF5.jl](https://juliaio.github.io/HDF5.jl/stable/mpi/).
-
-If you do not perform these steps to use parallel HDF5 or if the HDF5 is not MPI-enabled, Trixi.jl will fall back on a less efficient I/O mechanism. In that case, all disk I/O is performed only on rank zero and data is distributed to/gathered from the other ranks using regular MPI communication.
+Trixi.jl allows parallel I/O using MPI by leveraging parallel HDF5.jl. On most systems, this is
+enabled by default. Additionally, you can also use a local installation of the HDF5 library
+(with MPI support). For this, you first need to use a system-provided MPI library, see also
+[here](@ref parallel_system_MPI) and you need to tell [HDF5.jl](https://github.com/JuliaIO/HDF5.jl)
+to use this library. To do so with HDF5.jl v0.17 and newer, set the preferences `libhdf5` and
+`libhdf5_hl` to the local paths of the libraries `libhdf5` and `libhdf5_hl`, which can be done by
+```julia
+julia> using Preferences, UUIDs
+julia> set_preferences!(
+ UUID("f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"), # UUID of HDF5.jl
+ "libhdf5" => "/path/to/your/libhdf5.so",
+ "libhdf5_hl" => "/path/to/your/libhdf5_hl.so", force = true)
+```
+For more information see also the
+[documentation of HDF5.jl](https://juliaio.github.io/HDF5.jl/stable/mpi/). In total, you should
+have a file called LocalPreferences.toml in the project directory that contains a section
+`[MPIPreferences]`, a section `[HDF5]` with entries `libhdf5` and `libhdf5_hl`, a section `[P4est]`
+with the entry `libp4est` as well as a section `[T8code]` with the entries `libt8`, `libp4est`
+and `libsc`.
+If you use HDF5.jl v0.16 or older, instead of setting the preferences for HDF5.jl, you need to set
+the environment variable `JULIA_HDF5_PATH` to the path, where the HDF5 binaries are located and
+then call `]build HDF5` from Julia.
+
+If HDF5 is not MPI-enabled, Trixi.jl will fall back on a less efficient I/O mechanism. In that
+case, all disk I/O is performed only on rank zero and data is distributed to/gathered from the
+other ranks using regular MPI communication.
From 13260284dcbed67e9c430623cef049e171d19bfd Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Thu, 7 Sep 2023 08:10:57 +0200
Subject: [PATCH 18/36] set version to v0.5.41
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 41dde8662ab..37553fb70f4 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.41-pre"
+version = "0.5.41"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 953f88a78688969b893f34b3cf99693674217381 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Thu, 7 Sep 2023 08:11:09 +0200
Subject: [PATCH 19/36] set development version to v0.5.42-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 37553fb70f4..d37c0548a6a 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.41"
+version = "0.5.42-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 7791faa0ca116c047b41b8c556ec5175c4507a24 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 12 Sep 2023 10:53:52 +0200
Subject: [PATCH 20/36] Some multi-threading improvements (#1630)
* fix multi-threaded parabolic terms on ARM
On ARM, the previous versions resulted in
cfunction: closures are not supported on this platform
With this change, everything seems to work fine locally.
At least test/test_threaded.jl runs fine with two threads.
* reduce alloactions of multi-threaded parabolic terms a bit
Polyester.jl passes arrays as pointer arrays to the closures without requiring allocations.
More complicated structs may still require allocations, so unpacking some arrays before entering a threaded loop can reduce allocations.
* format
---
src/solvers/dgmulti/dg_parabolic.jl | 5 +-
src/solvers/dgsem_tree/dg_1d_parabolic.jl | 34 +++++++-----
src/solvers/dgsem_tree/dg_2d_parabolic.jl | 51 ++++++++++--------
src/solvers/dgsem_tree/dg_3d_parabolic.jl | 65 ++++++++++++-----------
4 files changed, 86 insertions(+), 69 deletions(-)
diff --git a/src/solvers/dgmulti/dg_parabolic.jl b/src/solvers/dgmulti/dg_parabolic.jl
index 72dbe2c4256..7dfe4430244 100644
--- a/src/solvers/dgmulti/dg_parabolic.jl
+++ b/src/solvers/dgmulti/dg_parabolic.jl
@@ -62,9 +62,10 @@ end
function transform_variables!(u_transformed, u, mesh,
equations_parabolic::AbstractEquationsParabolic,
dg::DGMulti, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for i in eachindex(u)
- u_transformed[i] = gradient_variable_transformation(equations_parabolic)(u[i],
- equations_parabolic)
+ u_transformed[i] = transformation(u[i], equations_parabolic)
end
end
diff --git a/src/solvers/dgsem_tree/dg_1d_parabolic.jl b/src/solvers/dgsem_tree/dg_1d_parabolic.jl
index c2aa75388c8..7602331d7c8 100644
--- a/src/solvers/dgsem_tree/dg_1d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_1d_parabolic.jl
@@ -105,12 +105,13 @@ end
function transform_variables!(u_transformed, u, mesh::TreeMesh{1},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for element in eachelement(dg, cache)
# Calculate volume terms in one element
for i in eachnode(dg)
u_node = get_node_vars(u, equations_parabolic, dg, i, element)
- u_transformed_node = gradient_variable_transformation(equations_parabolic)(u_node,
- equations_parabolic)
+ u_transformed_node = transformation(u_node, equations_parabolic)
set_node_vars!(u_transformed, u_transformed_node, equations_parabolic, dg,
i, element)
end
@@ -147,16 +148,18 @@ function prolong2interfaces!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack interfaces = cache_parabolic
+ @unpack neighbor_ids = interfaces
+ interfaces_u = interfaces.u
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
# interface in x-direction
for v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, interface] = flux_viscous[v, nnodes(dg), left_element]
- interfaces.u[2, v, interface] = flux_viscous[v, 1, right_element]
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, interface] = flux_viscous[v, nnodes(dg), left_element]
+ interfaces_u[2, v, interface] = flux_viscous[v, 1, right_element]
end
end
@@ -204,21 +207,22 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack boundaries = cache_parabolic
- @unpack neighbor_sides = boundaries
+ @unpack neighbor_sides, neighbor_ids = boundaries
+ boundaries_u = boundaries.u
@threaded for boundary in eachboundary(dg, cache_parabolic)
- element = boundaries.neighbor_ids[boundary]
+ element = neighbor_ids[boundary]
if neighbor_sides[boundary] == 1
# element in -x direction of boundary
for v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, boundary] = flux_viscous[v, nnodes(dg), element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, boundary] = flux_viscous[v, nnodes(dg), element]
end
else # Element in +x direction of boundary
for v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, boundary] = flux_viscous[v, 1, element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, boundary] = flux_viscous[v, 1, element]
end
end
end
@@ -552,8 +556,10 @@ end
# where f(u) is the inviscid flux and g(u) is the viscous flux.
function apply_jacobian_parabolic!(du, mesh::TreeMesh{1},
equations::AbstractEquationsParabolic, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = cache.elements.inverse_jacobian[element]
+ factor = inverse_jacobian[element]
for i in eachnode(dg)
for v in eachvariable(equations)
diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl
index 0da25230380..3dbc55412ad 100644
--- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl
@@ -118,12 +118,13 @@ end
function transform_variables!(u_transformed, u, mesh::Union{TreeMesh{2}, P4estMesh{2}},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for element in eachelement(dg, cache)
# Calculate volume terms in one element
for j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations_parabolic, dg, i, j, element)
- u_transformed_node = gradient_variable_transformation(equations_parabolic)(u_node,
- equations_parabolic)
+ u_transformed_node = transformation(u_node, equations_parabolic)
set_node_vars!(u_transformed, u_transformed_node, equations_parabolic, dg,
i, j, element)
end
@@ -168,30 +169,31 @@ function prolong2interfaces!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack interfaces = cache_parabolic
- @unpack orientations = interfaces
+ @unpack orientations, neighbor_ids = interfaces
+ interfaces_u = interfaces.u
flux_viscous_x, flux_viscous_y = flux_viscous
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
if orientations[interface] == 1
# interface in x-direction
for j in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, j, interface] = flux_viscous_x[v, nnodes(dg), j,
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, j, interface] = flux_viscous_x[v, nnodes(dg), j,
left_element]
- interfaces.u[2, v, j, interface] = flux_viscous_x[v, 1, j,
+ interfaces_u[2, v, j, interface] = flux_viscous_x[v, 1, j,
right_element]
end
else # if orientations[interface] == 2
# interface in y-direction
for i in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, i, interface] = flux_viscous_y[v, i, nnodes(dg),
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, i, interface] = flux_viscous_y[v, i, nnodes(dg),
left_element]
- interfaces.u[2, v, i, interface] = flux_viscous_y[v, i, 1,
+ interfaces_u[2, v, i, interface] = flux_viscous_y[v, i, 1,
right_element]
end
end
@@ -244,25 +246,26 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack boundaries = cache_parabolic
- @unpack orientations, neighbor_sides = boundaries
+ @unpack orientations, neighbor_sides, neighbor_ids = boundaries
+ boundaries_u = boundaries.u
flux_viscous_x, flux_viscous_y = flux_viscous
@threaded for boundary in eachboundary(dg, cache_parabolic)
- element = boundaries.neighbor_ids[boundary]
+ element = neighbor_ids[boundary]
if orientations[boundary] == 1
# boundary in x-direction
if neighbor_sides[boundary] == 1
# element in -x direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, l, boundary] = flux_viscous_x[v, nnodes(dg), l,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, l, boundary] = flux_viscous_x[v, nnodes(dg), l,
element]
end
else # Element in +x direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, l, boundary] = flux_viscous_x[v, 1, l, element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, l, boundary] = flux_viscous_x[v, 1, l, element]
end
end
else # if orientations[boundary] == 2
@@ -270,15 +273,15 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
if neighbor_sides[boundary] == 1
# element in -y direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, l, boundary] = flux_viscous_y[v, l, nnodes(dg),
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, l, boundary] = flux_viscous_y[v, l, nnodes(dg),
element]
end
else
# element in +y direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, l, boundary] = flux_viscous_y[v, l, 1, element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, l, boundary] = flux_viscous_y[v, l, 1, element]
end
end
end
@@ -608,7 +611,7 @@ function prolong2mortars!(cache, flux_viscous::Tuple{AbstractArray, AbstractArra
end
# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms.
-# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
+# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
# hyperbolic terms with conserved terms only, i.e., no nonconservative terms.
function calc_mortar_flux!(surface_flux_values,
mesh::TreeMesh{2},
@@ -934,8 +937,10 @@ end
# where f(u) is the inviscid flux and g(u) is the viscous flux.
function apply_jacobian_parabolic!(du, mesh::Union{TreeMesh{2}, P4estMesh{2}},
equations::AbstractEquationsParabolic, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = cache.elements.inverse_jacobian[element]
+ factor = inverse_jacobian[element]
for j in eachnode(dg), i in eachnode(dg)
for v in eachvariable(equations)
diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl
index 2745d312b37..9817e0e5f0e 100644
--- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl
@@ -118,12 +118,13 @@ end
function transform_variables!(u_transformed, u, mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for element in eachelement(dg, cache)
# Calculate volume terms in one element
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations_parabolic, dg, i, j, k, element)
- u_transformed_node = gradient_variable_transformation(equations_parabolic)(u_node,
- equations_parabolic)
+ u_transformed_node = transformation(u_node, equations_parabolic)
set_node_vars!(u_transformed, u_transformed_node, equations_parabolic, dg,
i, j, k, element)
end
@@ -175,43 +176,44 @@ function prolong2interfaces!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack interfaces = cache_parabolic
- @unpack orientations = interfaces
+ @unpack orientations, neighbor_ids = interfaces
+ interfaces_u = interfaces.u
flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
if orientations[interface] == 1
# interface in x-direction
for k in eachnode(dg), j in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, j, k, interface] = flux_viscous_x[v, nnodes(dg), j,
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, j, k, interface] = flux_viscous_x[v, nnodes(dg), j,
k, left_element]
- interfaces.u[2, v, j, k, interface] = flux_viscous_x[v, 1, j, k,
+ interfaces_u[2, v, j, k, interface] = flux_viscous_x[v, 1, j, k,
right_element]
end
elseif orientations[interface] == 2
# interface in y-direction
for k in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, i, k, interface] = flux_viscous_y[v, i, nnodes(dg),
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, i, k, interface] = flux_viscous_y[v, i, nnodes(dg),
k, left_element]
- interfaces.u[2, v, i, k, interface] = flux_viscous_y[v, i, 1, k,
+ interfaces_u[2, v, i, k, interface] = flux_viscous_y[v, i, 1, k,
right_element]
end
else # if orientations[interface] == 3
# interface in z-direction
for j in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, i, j, interface] = flux_viscous_z[v, i, j,
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, i, j, interface] = flux_viscous_z[v, i, j,
nnodes(dg),
left_element]
- interfaces.u[2, v, i, j, interface] = flux_viscous_z[v, i, j, 1,
+ interfaces_u[2, v, i, j, interface] = flux_viscous_z[v, i, j, 1,
right_element]
end
end
@@ -265,11 +267,12 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack boundaries = cache_parabolic
- @unpack orientations, neighbor_sides = boundaries
+ @unpack orientations, neighbor_sides, neighbor_ids = boundaries
+ boundaries_u = boundaries.u
flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous
@threaded for boundary in eachboundary(dg, cache_parabolic)
- element = boundaries.neighbor_ids[boundary]
+ element = neighbor_ids[boundary]
if orientations[boundary] == 1
# boundary in x-direction
@@ -277,15 +280,15 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in -x direction of boundary
for k in eachnode(dg), j in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, j, k, boundary] = flux_viscous_x[v, nnodes(dg),
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, j, k, boundary] = flux_viscous_x[v, nnodes(dg),
j, k, element]
end
else # Element in +x direction of boundary
for k in eachnode(dg), j in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, j, k, boundary] = flux_viscous_x[v, 1, j, k,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, j, k, boundary] = flux_viscous_x[v, 1, j, k,
element]
end
end
@@ -295,8 +298,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in -y direction of boundary
for k in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, i, k, boundary] = flux_viscous_y[v, i,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, i, k, boundary] = flux_viscous_y[v, i,
nnodes(dg), k,
element]
end
@@ -304,8 +307,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in +y direction of boundary
for k in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, i, k, boundary] = flux_viscous_y[v, i, 1, k,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, i, k, boundary] = flux_viscous_y[v, i, 1, k,
element]
end
end
@@ -315,8 +318,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in -z direction of boundary
for j in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, i, j, boundary] = flux_viscous_z[v, i, j,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, i, j, boundary] = flux_viscous_z[v, i, j,
nnodes(dg),
element]
end
@@ -324,8 +327,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in +z direction of boundary
for j in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, i, j, boundary] = flux_viscous_z[v, i, j, 1,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, i, j, boundary] = flux_viscous_z[v, i, j, 1,
element]
end
end
@@ -820,7 +823,7 @@ function prolong2mortars!(cache,
end
# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms.
-# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
+# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
# hyperbolic terms with conserved terms only, i.e., no nonconservative terms.
function calc_mortar_flux!(surface_flux_values,
mesh::TreeMesh{3},
@@ -1124,8 +1127,10 @@ end
# where f(u) is the inviscid flux and g(u) is the viscous flux.
function apply_jacobian_parabolic!(du, mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations::AbstractEquationsParabolic, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = cache.elements.inverse_jacobian[element]
+ factor = inverse_jacobian[element]
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
for v in eachvariable(equations)
From cfbf048308b1074591e08f8627c0089871bf91f3 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 12 Sep 2023 12:11:06 +0200
Subject: [PATCH 21/36] remove JuliaCOn 2023 announcement (#1631)
---
README.md | 10 ----------
1 file changed, 10 deletions(-)
diff --git a/README.md b/README.md
index 63540b1f640..c177ad2347f 100644
--- a/README.md
+++ b/README.md
@@ -17,16 +17,6 @@
-***
-**Trixi.jl at JuliaCon 2023**
-At this year's JuliaCon, we will be present with an online contribution that involves Trixi.jl:
-
-* [Scaling Trixi.jl to more than 10,000 cores using MPI](https://pretalx.com/juliacon2023/talk/PC8PZ8/),
- 27th July 2023, 10:30–11:30 (US/Eastern), 32-G449 (Kiva)
-
-We are looking forward to seeing you there ♥️
-***
-
**Trixi.jl** is a numerical simulation framework for hyperbolic conservation
laws written in [Julia](https://julialang.org). A key objective for the
framework is to be useful to both scientists and students. Therefore, next to
From b9f3f3051c483e8ad09cb51857eec9bb228e267c Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 12 Sep 2023 12:50:44 +0200
Subject: [PATCH 22/36] set version to v0.5.42
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index d37c0548a6a..9f27fbb2710 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.42-pre"
+version = "0.5.42"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From daf18a5352a80ca2fbb2077ace991b9e5cc33c16 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 12 Sep 2023 12:50:58 +0200
Subject: [PATCH 23/36] set development version to v0.5.43-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 9f27fbb2710..06fd29ba590 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.42"
+version = "0.5.43-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 3523c49120d7c282518769a5b3d40ce7c9cc5882 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Tue, 12 Sep 2023 15:00:58 +0200
Subject: [PATCH 24/36] AMR for 1D Parabolic Eqs (Clean branch) (#1605)
* Clean branch
* Un-Comment
* un-comment
* test coarsen
* remove redundancy
* Remove support for passive terms
* expand resize
* comments
* format
* Avoid code duplication
* Update src/callbacks_step/amr_dg1d.jl
Co-authored-by: Michael Schlottke-Lakemper
* comment
* comment & format
* Try to increase coverage
* Slightly more expressive names
* Apply suggestions from code review
---------
Co-authored-by: Michael Schlottke-Lakemper
---
...ixir_navierstokes_convergence_walls_amr.jl | 172 ++++++++++++++++++
src/callbacks_step/amr.jl | 158 ++++++++++++++++
src/callbacks_step/amr_dg1d.jl | 73 ++++++++
.../dgsem_tree/container_viscous_1d.jl | 58 ++++++
src/solvers/dgsem_tree/dg.jl | 3 +
src/solvers/dgsem_tree/dg_1d_parabolic.jl | 14 +-
test/test_parabolic_1d.jl | 41 +++++
test/test_parabolic_2d.jl | 12 +-
test/test_parabolic_3d.jl | 12 +-
9 files changed, 523 insertions(+), 20 deletions(-)
create mode 100644 examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl
create mode 100644 src/solvers/dgsem_tree/container_viscous_1d.jl
diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl
new file mode 100644
index 00000000000..1daeab04a71
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl
@@ -0,0 +1,172 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the ideal compressible Navier-Stokes equations
+
+prandtl_number() = 0.72
+mu() = 0.01
+
+equations = CompressibleEulerEquations1D(1.4)
+equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu=mu(), Prandtl=prandtl_number(),
+ gradient_variables=GradientVariablesEntropy())
+
+# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux
+solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs,
+ volume_integral=VolumeIntegralWeakForm())
+
+coordinates_min = -1.0
+coordinates_max = 1.0
+
+# Create a uniformly refined mesh with periodic boundaries
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=3,
+ periodicity=false,
+ n_cells_max=30_000) # set maximum capacity of tree data structure
+
+# Note: the initial condition cannot be specialized to `CompressibleNavierStokesDiffusion1D`
+# since it is called by both the parabolic solver (which passes in `CompressibleNavierStokesDiffusion1D`)
+# and by the initial condition (which passes in `CompressibleEulerEquations1D`).
+# This convergence test setup was originally derived by Andrew Winters (@andrewwinters5000)
+function initial_condition_navier_stokes_convergence_test(x, t, equations)
+ # Amplitude and shift
+ A = 0.5
+ c = 2.0
+
+ # convenience values for trig. functions
+ pi_x = pi * x[1]
+ pi_t = pi * t
+
+ rho = c + A * cos(pi_x) * cos(pi_t)
+ v1 = log(x[1] + 2.0) * (1.0 - exp(-A * (x[1] - 1.0)) ) * cos(pi_t)
+ p = rho^2
+
+ return prim2cons(SVector(rho, v1, p), equations)
+end
+
+@inline function source_terms_navier_stokes_convergence_test(u, x, t, equations)
+ x = x[1]
+
+ # TODO: parabolic
+ # we currently need to hardcode these parameters until we fix the "combined equation" issue
+ # see also https://github.com/trixi-framework/Trixi.jl/pull/1160
+ inv_gamma_minus_one = inv(equations.gamma - 1)
+ Pr = prandtl_number()
+ mu_ = mu()
+
+ # Same settings as in `initial_condition`
+ # Amplitude and shift
+ A = 0.5
+ c = 2.0
+
+ # convenience values for trig. functions
+ pi_x = pi * x
+ pi_t = pi * t
+
+ # compute the manufactured solution and all necessary derivatives
+ rho = c + A * cos(pi_x) * cos(pi_t)
+ rho_t = -pi * A * cos(pi_x) * sin(pi_t)
+ rho_x = -pi * A * sin(pi_x) * cos(pi_t)
+ rho_xx = -pi * pi * A * cos(pi_x) * cos(pi_t)
+
+ v1 = log(x + 2.0) * (1.0 - exp(-A * (x - 1.0))) * cos(pi_t)
+ v1_t = -pi * log(x + 2.0) * (1.0 - exp(-A * (x - 1.0))) * sin(pi_t)
+ v1_x = (A * log(x + 2.0) * exp(-A * (x - 1.0)) + (1.0 - exp(-A * (x - 1.0))) / (x + 2.0)) * cos(pi_t)
+ v1_xx = (( 2.0 * A * exp(-A * (x - 1.0)) / (x + 2.0)
+ - A * A * log(x + 2.0) * exp(-A * (x - 1.0))
+ - (1.0 - exp(-A * (x - 1.0))) / ((x + 2.0) * (x + 2.0))) * cos(pi_t))
+
+ p = rho * rho
+ p_t = 2.0 * rho * rho_t
+ p_x = 2.0 * rho * rho_x
+ p_xx = 2.0 * rho * rho_xx + 2.0 * rho_x * rho_x
+
+ # Note this simplifies slightly because the ansatz assumes that v1 = v2
+ E = p * inv_gamma_minus_one + 0.5 * rho * v1^2
+ E_t = p_t * inv_gamma_minus_one + 0.5 * rho_t * v1^2 + rho * v1 * v1_t
+ E_x = p_x * inv_gamma_minus_one + 0.5 * rho_x * v1^2 + rho * v1 * v1_x
+
+ # Some convenience constants
+ T_const = equations.gamma * inv_gamma_minus_one / Pr
+ inv_rho_cubed = 1.0 / (rho^3)
+
+ # compute the source terms
+ # density equation
+ du1 = rho_t + rho_x * v1 + rho * v1_x
+
+ # y-momentum equation
+ du2 = ( rho_t * v1 + rho * v1_t
+ + p_x + rho_x * v1^2 + 2.0 * rho * v1 * v1_x
+ # stress tensor from y-direction
+ - v1_xx * mu_)
+
+ # total energy equation
+ du3 = ( E_t + v1_x * (E + p) + v1 * (E_x + p_x)
+ # stress tensor and temperature gradient terms from x-direction
+ - v1_xx * v1 * mu_
+ - v1_x * v1_x * mu_
+ - T_const * inv_rho_cubed * ( p_xx * rho * rho
+ - 2.0 * p_x * rho * rho_x
+ + 2.0 * p * rho_x * rho_x
+ - p * rho * rho_xx ) * mu_ )
+
+ return SVector(du1, du2, du3)
+end
+
+initial_condition = initial_condition_navier_stokes_convergence_test
+
+# BC types
+velocity_bc_left_right = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2])
+
+heat_bc_left = Isothermal((x, t, equations) ->
+ Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, t, equations),
+ equations_parabolic))
+heat_bc_right = Adiabatic((x, t, equations) -> 0.0)
+
+boundary_condition_left = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_left)
+boundary_condition_right = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_right)
+
+# define inviscid boundary conditions
+boundary_conditions = (; x_neg = boundary_condition_slip_wall,
+ x_pos = boundary_condition_slip_wall)
+
+# define viscous boundary conditions
+boundary_conditions_parabolic = (; x_neg = boundary_condition_left,
+ x_pos = boundary_condition_right)
+
+semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver;
+ boundary_conditions=(boundary_conditions, boundary_conditions_parabolic),
+ source_terms=source_terms_navier_stokes_convergence_test)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span `tspan`
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+alive_callback = AliveCallback(alive_interval=10)
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+amr_controller = ControllerThreeLevel(semi,
+ IndicatorLöhner(semi, variable=Trixi.density),
+ base_level=3,
+ med_level=4, med_threshold=0.005,
+ max_level=5, max_threshold=0.01)
+
+amr_callback = AMRCallback(semi, amr_controller,
+ interval=5,
+ adapt_initial_condition=true)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback)
+
+###############################################################################
+# run the simulation
+
+time_int_tol = 1e-8
+sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
+ ode_default_options()..., callback=callbacks)
+summary_callback() # print the timer summary
\ No newline at end of file
diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl
index 4d80e6e1139..ba840ff9675 100644
--- a/src/callbacks_step/amr.jl
+++ b/src/callbacks_step/amr.jl
@@ -192,6 +192,16 @@ end
amr_callback(u_ode, mesh_equations_solver_cache(semi)..., semi, t, iter; kwargs...)
end
+@inline function (amr_callback::AMRCallback)(u_ode::AbstractVector,
+ semi::SemidiscretizationHyperbolicParabolic,
+ t, iter;
+ kwargs...)
+ # Note that we don't `wrap_array` the vector `u_ode` to be able to `resize!`
+ # it when doing AMR while still dispatching on the `mesh` etc.
+ amr_callback(u_ode, mesh_equations_solver_cache(semi)..., semi.cache_parabolic,
+ semi, t, iter; kwargs...)
+end
+
# `passive_args` is currently used for Euler with self-gravity to adapt the gravity solver
# passively without querying its indicator, based on the assumption that both solvers use
# the same mesh. That's a hack and should be improved in the future once we have more examples
@@ -346,6 +356,154 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
return has_changed
end
+function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
+ equations, dg::DG,
+ cache, cache_parabolic,
+ semi::SemidiscretizationHyperbolicParabolic,
+ t, iter;
+ only_refine = false, only_coarsen = false)
+ @unpack controller, adaptor = amr_callback
+
+ u = wrap_array(u_ode, mesh, equations, dg, cache)
+ # Indicator kept based on hyperbolic variables
+ lambda = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, cache,
+ t = t, iter = iter)
+
+ if mpi_isparallel()
+ error("MPI has not been verified yet for parabolic AMR")
+
+ # Collect lambda for all elements
+ lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache))
+ # Use parent because n_elements_by_rank is an OffsetArray
+ recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank))
+ MPI.Allgatherv!(lambda, recvbuf, mpi_comm())
+ lambda = lambda_global
+ end
+
+ leaf_cell_ids = leaf_cells(mesh.tree)
+ @boundscheck begin
+ @assert axes(lambda)==axes(leaf_cell_ids) ("Indicator (axes = $(axes(lambda))) and leaf cell (axes = $(axes(leaf_cell_ids))) arrays have different axes")
+ end
+
+ @unpack to_refine, to_coarsen = amr_callback.amr_cache
+ empty!(to_refine)
+ empty!(to_coarsen)
+ for element in 1:length(lambda)
+ controller_value = lambda[element]
+ if controller_value > 0
+ push!(to_refine, leaf_cell_ids[element])
+ elseif controller_value < 0
+ push!(to_coarsen, leaf_cell_ids[element])
+ end
+ end
+
+ @trixi_timeit timer() "refine" if !only_coarsen && !isempty(to_refine)
+ # refine mesh
+ refined_original_cells = @trixi_timeit timer() "mesh" refine!(mesh.tree,
+ to_refine)
+
+ # Find all indices of elements whose cell ids are in refined_original_cells
+ # Note: This assumes same indices for hyperbolic and parabolic part.
+ elements_to_refine = findall(in(refined_original_cells),
+ cache.elements.cell_ids)
+
+ # refine solver
+ @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg,
+ cache, cache_parabolic,
+ elements_to_refine)
+ else
+ # If there is nothing to refine, create empty array for later use
+ refined_original_cells = Int[]
+ end
+
+ @trixi_timeit timer() "coarsen" if !only_refine && !isempty(to_coarsen)
+ # Since the cells may have been shifted due to refinement, first we need to
+ # translate the old cell ids to the new cell ids
+ if !isempty(to_coarsen)
+ to_coarsen = original2refined(to_coarsen, refined_original_cells, mesh)
+ end
+
+ # Next, determine the parent cells from which the fine cells are to be
+ # removed, since these are needed for the coarsen! function. However, since
+ # we only want to coarsen if *all* child cells are marked for coarsening,
+ # we count the coarsening indicators for each parent cell and only coarsen
+ # if all children are marked as such (i.e., where the count is 2^ndims). At
+ # the same time, check if a cell is marked for coarsening even though it is
+ # *not* a leaf cell -> this can only happen if it was refined due to 2:1
+ # smoothing during the preceding refinement operation.
+ parents_to_coarsen = zeros(Int, length(mesh.tree))
+ for cell_id in to_coarsen
+ # If cell has no parent, it cannot be coarsened
+ if !has_parent(mesh.tree, cell_id)
+ continue
+ end
+
+ # If cell is not leaf (anymore), it cannot be coarsened
+ if !is_leaf(mesh.tree, cell_id)
+ continue
+ end
+
+ # Increase count for parent cell
+ parent_id = mesh.tree.parent_ids[cell_id]
+ parents_to_coarsen[parent_id] += 1
+ end
+
+ # Extract only those parent cells for which all children should be coarsened
+ to_coarsen = collect(1:length(parents_to_coarsen))[parents_to_coarsen .== 2^ndims(mesh)]
+
+ # Finally, coarsen mesh
+ coarsened_original_cells = @trixi_timeit timer() "mesh" coarsen!(mesh.tree,
+ to_coarsen)
+
+ # Convert coarsened parent cell ids to the list of child cell ids that have
+ # been removed, since this is the information that is expected by the solver
+ removed_child_cells = zeros(Int,
+ n_children_per_cell(mesh.tree) *
+ length(coarsened_original_cells))
+ for (index, coarse_cell_id) in enumerate(coarsened_original_cells)
+ for child in 1:n_children_per_cell(mesh.tree)
+ removed_child_cells[n_children_per_cell(mesh.tree) * (index - 1) + child] = coarse_cell_id +
+ child
+ end
+ end
+
+ # Find all indices of elements whose cell ids are in removed_child_cells
+ # Note: This assumes same indices for hyperbolic and parabolic part.
+ elements_to_remove = findall(in(removed_child_cells), cache.elements.cell_ids)
+
+ # coarsen solver
+ @trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg,
+ cache, cache_parabolic,
+ elements_to_remove)
+ else
+ # If there is nothing to coarsen, create empty array for later use
+ coarsened_original_cells = Int[]
+ end
+
+ # Store whether there were any cells coarsened or refined
+ has_changed = !isempty(refined_original_cells) || !isempty(coarsened_original_cells)
+ if has_changed # TODO: Taal decide, where shall we set this?
+ # don't set it to has_changed since there can be changes from earlier calls
+ mesh.unsaved_changes = true
+ end
+
+ # Dynamically balance computational load by first repartitioning the mesh and then redistributing the cells/elements
+ if has_changed && mpi_isparallel() && amr_callback.dynamic_load_balancing
+ error("MPI has not been verified yet for parabolic AMR")
+
+ @trixi_timeit timer() "dynamic load balancing" begin
+ old_mpi_ranks_per_cell = copy(mesh.tree.mpi_ranks)
+
+ partition!(mesh)
+
+ rebalance_solver!(u_ode, mesh, equations, dg, cache, old_mpi_ranks_per_cell)
+ end
+ end
+
+ # Return true if there were any cells coarsened or refined, otherwise false
+ return has_changed
+end
+
# Copy controller values to quad user data storage, will be called below
function copy_to_quad_iter_volume(info, user_data)
info_pw = PointerWrapper(info)
diff --git a/src/callbacks_step/amr_dg1d.jl b/src/callbacks_step/amr_dg1d.jl
index e31a74730ea..e721ccc61cb 100644
--- a/src/callbacks_step/amr_dg1d.jl
+++ b/src/callbacks_step/amr_dg1d.jl
@@ -76,6 +76,44 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
return nothing
end
+function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
+ equations, dg::DGSEM, cache, cache_parabolic,
+ elements_to_refine)
+ # Call `refine!` for the hyperbolic part, which does the heavy lifting of
+ # actually transferring the solution to the refined cells
+ refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine)
+
+ # The remaining function only handles the necessary adaptation of the data structures
+ # for the parabolic part of the semidiscretization
+
+ # Get new list of leaf cells
+ leaf_cell_ids = local_leaf_cells(mesh.tree)
+
+ @unpack elements, viscous_container = cache_parabolic
+ resize!(elements, length(leaf_cell_ids))
+ init_elements!(elements, leaf_cell_ids, mesh, dg.basis)
+
+ # Resize parabolic helper variables
+ resize!(viscous_container, equations, dg, cache)
+
+ # re-initialize interfaces container
+ @unpack interfaces = cache_parabolic
+ resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids))
+ init_interfaces!(interfaces, elements, mesh)
+
+ # re-initialize boundaries container
+ @unpack boundaries = cache_parabolic
+ resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids))
+ init_boundaries!(boundaries, elements, mesh)
+
+ # Sanity check
+ if isperiodic(mesh.tree)
+ @assert ninterfaces(interfaces)==1 * nelements(dg, cache_parabolic) ("For 1D and periodic domains, the number of interfaces must be the same as the number of elements")
+ end
+
+ return nothing
+end
+
# TODO: Taal compare performance of different implementations
# Refine solution data u for an element, using L2 projection (interpolation)
function refine_element!(u::AbstractArray{<:Any, 3}, element_id,
@@ -201,6 +239,41 @@ function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
return nothing
end
+function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
+ equations, dg::DGSEM, cache, cache_parabolic,
+ elements_to_remove)
+ # Call `coarsen!` for the hyperbolic part, which does the heavy lifting of
+ # actually transferring the solution to the coarsened cells
+ coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove)
+
+ # Get new list of leaf cells
+ leaf_cell_ids = local_leaf_cells(mesh.tree)
+
+ @unpack elements, viscous_container = cache_parabolic
+ resize!(elements, length(leaf_cell_ids))
+ init_elements!(elements, leaf_cell_ids, mesh, dg.basis)
+
+ # Resize parabolic helper variables
+ resize!(viscous_container, equations, dg, cache)
+
+ # re-initialize interfaces container
+ @unpack interfaces = cache_parabolic
+ resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids))
+ init_interfaces!(interfaces, elements, mesh)
+
+ # re-initialize boundaries container
+ @unpack boundaries = cache_parabolic
+ resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids))
+ init_boundaries!(boundaries, elements, mesh)
+
+ # Sanity check
+ if isperiodic(mesh.tree)
+ @assert ninterfaces(interfaces)==1 * nelements(dg, cache_parabolic) ("For 1D and periodic domains, the number of interfaces must be the same as the number of elements")
+ end
+
+ return nothing
+end
+
# TODO: Taal compare performance of different implementations
# Coarsen solution data u for two elements, using L2 projection
function coarsen_elements!(u::AbstractArray{<:Any, 3}, element_id,
diff --git a/src/solvers/dgsem_tree/container_viscous_1d.jl b/src/solvers/dgsem_tree/container_viscous_1d.jl
new file mode 100644
index 00000000000..a4919f75396
--- /dev/null
+++ b/src/solvers/dgsem_tree/container_viscous_1d.jl
@@ -0,0 +1,58 @@
+mutable struct ViscousContainer1D{uEltype <: Real}
+ u_transformed::Array{uEltype, 3}
+ gradients::Array{uEltype, 3}
+ flux_viscous::Array{uEltype, 3}
+
+ # internal `resize!`able storage
+ _u_transformed::Vector{uEltype}
+ _gradients::Vector{uEltype}
+ _flux_viscous::Vector{uEltype}
+
+ function ViscousContainer1D{uEltype}(n_vars::Integer, n_nodes::Integer,
+ n_elements::Integer) where {uEltype <: Real}
+ new(Array{uEltype, 3}(undef, n_vars, n_nodes, n_elements),
+ Array{uEltype, 3}(undef, n_vars, n_nodes, n_elements),
+ Array{uEltype, 3}(undef, n_vars, n_nodes, n_elements),
+ Vector{uEltype}(undef, n_vars * n_nodes * n_elements),
+ Vector{uEltype}(undef, n_vars * n_nodes * n_elements),
+ Vector{uEltype}(undef, n_vars * n_nodes * n_elements))
+ end
+end
+
+function init_viscous_container(n_vars::Integer, n_nodes::Integer,
+ n_elements::Integer,
+ ::Type{uEltype}) where {uEltype <: Real}
+ return ViscousContainer1D{uEltype}(n_vars, n_nodes, n_elements)
+end
+
+# Only one-dimensional `Array`s are `resize!`able in Julia.
+# Hence, we use `Vector`s as internal storage and `resize!`
+# them whenever needed. Then, we reuse the same memory by
+# `unsafe_wrap`ping multi-dimensional `Array`s around the
+# internal storage.
+function Base.resize!(viscous_container::ViscousContainer1D, equations, dg, cache)
+ capacity = nvariables(equations) * nnodes(dg) * nelements(dg, cache)
+ resize!(viscous_container._u_transformed, capacity)
+ resize!(viscous_container._gradients, capacity)
+ resize!(viscous_container._flux_viscous, capacity)
+
+ viscous_container.u_transformed = unsafe_wrap(Array,
+ pointer(viscous_container._u_transformed),
+ (nvariables(equations),
+ nnodes(dg),
+ nelements(dg, cache)))
+
+ viscous_container.gradients = unsafe_wrap(Array,
+ pointer(viscous_container._gradients),
+ (nvariables(equations),
+ nnodes(dg),
+ nelements(dg, cache)))
+
+ viscous_container.flux_viscous = unsafe_wrap(Array,
+ pointer(viscous_container._flux_viscous),
+ (nvariables(equations),
+ nnodes(dg),
+ nelements(dg, cache)))
+
+ return nothing
+end
diff --git a/src/solvers/dgsem_tree/dg.jl b/src/solvers/dgsem_tree/dg.jl
index 6e02bc1d94a..ff37bad3b3a 100644
--- a/src/solvers/dgsem_tree/dg.jl
+++ b/src/solvers/dgsem_tree/dg.jl
@@ -54,6 +54,9 @@ include("containers.jl")
# Dimension-agnostic parallel setup
include("dg_parallel.jl")
+# Helper struct for parabolic AMR
+include("container_viscous_1d.jl")
+
# 1D DG implementation
include("dg_1d.jl")
include("dg_1d_parabolic.jl")
diff --git a/src/solvers/dgsem_tree/dg_1d_parabolic.jl b/src/solvers/dgsem_tree/dg_1d_parabolic.jl
index 7602331d7c8..97e31e0e22b 100644
--- a/src/solvers/dgsem_tree/dg_1d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_1d_parabolic.jl
@@ -17,7 +17,8 @@ function rhs_parabolic!(du, u, t, mesh::TreeMesh{1},
equations_parabolic::AbstractEquationsParabolic,
initial_condition, boundary_conditions_parabolic, source_terms,
dg::DG, parabolic_scheme, cache, cache_parabolic)
- @unpack u_transformed, gradients, flux_viscous = cache_parabolic
+ @unpack viscous_container = cache_parabolic
+ @unpack u_transformed, gradients, flux_viscous = viscous_container
# Convert conservative variables to a form more suitable for viscous flux calculations
@trixi_timeit timer() "transform variables" begin
@@ -534,18 +535,15 @@ function create_cache_parabolic(mesh::TreeMesh{1},
elements = init_elements(leaf_cell_ids, mesh, equations_hyperbolic, dg.basis, RealT,
uEltype)
- n_vars = nvariables(equations_hyperbolic)
- n_nodes = nnodes(elements)
- n_elements = nelements(elements)
- u_transformed = Array{uEltype}(undef, n_vars, n_nodes, n_elements)
- gradients = similar(u_transformed)
- flux_viscous = similar(u_transformed)
+ viscous_container = init_viscous_container(nvariables(equations_hyperbolic),
+ nnodes(elements), nelements(elements),
+ uEltype)
interfaces = init_interfaces(leaf_cell_ids, mesh, elements)
boundaries = init_boundaries(leaf_cell_ids, mesh, elements)
- cache = (; elements, interfaces, boundaries, gradients, flux_viscous, u_transformed)
+ cache = (; elements, interfaces, boundaries, viscous_container)
return cache
end
diff --git a/test/test_parabolic_1d.jl b/test/test_parabolic_1d.jl
index 06a55100d62..3c2b8855ce8 100644
--- a/test/test_parabolic_1d.jl
+++ b/test/test_parabolic_1d.jl
@@ -20,6 +20,28 @@ isdir(outdir) && rm(outdir, recursive=true)
)
end
+ @trixi_testset "TreeMesh1D: elixir_advection_diffusion.jl (AMR)" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_advection_diffusion.jl"),
+ tspan=(0.0, 0.0), initial_refinement_level = 5)
+ tspan=(0.0, 1.0)
+ ode = semidiscretize(semi, tspan)
+ amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable=first),
+ base_level=4,
+ med_level=5, med_threshold=0.1,
+ max_level=6, max_threshold=0.6)
+ amr_callback = AMRCallback(semi, amr_controller,
+ interval=5,
+ adapt_initial_condition=true)
+
+ # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+ callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback)
+ sol = solve(ode, KenCarp4(autodiff=false), abstol=time_abs_tol, reltol=time_int_tol,
+ save_everystep=false, callback=callbacks)
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [6.4878111416468355e-6]
+ @test linf_error ≈ [3.258075790424364e-5]
+ end
+
@trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_periodic.jl" begin
@test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_navierstokes_convergence_periodic.jl"),
l2 = [0.0001133835907077494, 6.226282245610444e-5, 0.0002820171699999139],
@@ -53,6 +75,25 @@ isdir(outdir) && rm(outdir, recursive=true)
linf = [0.002754803146635787, 0.0028567714697580906, 0.012941794048176192]
)
end
+
+ @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls_amr.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_navierstokes_convergence_walls_amr.jl"),
+ equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu=mu(),
+ Prandtl=prandtl_number()),
+ l2 = [2.527877257772131e-5, 2.5539911566937718e-5, 0.0001211860451244785],
+ linf = [0.00014663867588948776, 0.00019422448348348196, 0.0009556439394007299]
+ )
+ end
+
+ @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls_amr.jl: GradientVariablesEntropy" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_navierstokes_convergence_walls_amr.jl"),
+ equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu=mu(),
+ Prandtl=prandtl_number(),
+ gradient_variables = GradientVariablesEntropy()),
+ l2 = [2.4593699163175966e-5, 2.392863645712634e-5, 0.00011252526651714956],
+ linf = [0.00011850555445525046, 0.0001898777490968537, 0.0009597561467877824]
+ )
+ end
end
# Clean up afterwards: delete Trixi output directory
diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl
index 1564a33dc41..3fff4382cd1 100644
--- a/test/test_parabolic_2d.jl
+++ b/test/test_parabolic_2d.jl
@@ -143,9 +143,9 @@ isdir(outdir) && rm(outdir, recursive=true)
callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
ode_default_options()..., callback=callbacks)
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2[1] ≈ 1.67452550744728e-6
- @test ac_sol.linf[1] ≈ 7.905059166368744e-6
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [1.67452550744728e-6]
+ @test linf_error ≈ [7.905059166368744e-6]
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
@@ -229,9 +229,9 @@ isdir(outdir) && rm(outdir, recursive=true)
callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
ode_default_options()..., callback=callbacks)
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2 ≈ [0.00024296959173852447; 0.0002093263158670915; 0.0005390572390977262; 0.00026753561392341537]
- @test ac_sol.linf ≈ [0.0016210102053424436; 0.002593287648655501; 0.002953907343823712; 0.002077119120180271]
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [0.00024296959173852447; 0.0002093263158670915; 0.0005390572390977262; 0.00026753561392341537]
+ @test linf_error ≈ [0.0016210102053424436; 0.002593287648655501; 0.002953907343823712; 0.002077119120180271]
end
@trixi_testset "TreeMesh2D: elixir_navierstokes_lid_driven_cavity.jl" begin
diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl
index d607962afa0..ded052fb9d3 100644
--- a/test/test_parabolic_3d.jl
+++ b/test/test_parabolic_3d.jl
@@ -94,9 +94,9 @@ isdir(outdir) && rm(outdir, recursive=true)
callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
ode_default_options()..., callback=callbacks)
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2 ≈ [0.0003991794175622818; 0.0008853745163670504; 0.0010658655552066817; 0.0008785559918324284; 0.001403163458422815]
- @test ac_sol.linf ≈ [0.0035306410538458177; 0.01505692306169911; 0.008862444161110705; 0.015065647972869856; 0.030402714743065218]
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [0.0003991794175622818; 0.0008853745163670504; 0.0010658655552066817; 0.0008785559918324284; 0.001403163458422815]
+ @test linf_error ≈ [0.0035306410538458177; 0.01505692306169911; 0.008862444161110705; 0.015065647972869856; 0.030402714743065218]
end
@trixi_testset "TreeMesh3D: elixir_navierstokes_taylor_green_vortex.jl" begin
@@ -127,9 +127,9 @@ isdir(outdir) && rm(outdir, recursive=true)
sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
dt=5e-3,
save_everystep=false, callback=callbacks);
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2 ≈ [0.0013666103707729502; 0.2313581629543744; 0.2308164306264533; 0.17460246787819503; 0.28121914446544005]
- @test ac_sol.linf ≈ [0.006938093883741336; 1.028235074139312; 1.0345438209717241; 1.0821111605203542; 1.2669636522564645]
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [0.0013666103707729502; 0.2313581629543744; 0.2308164306264533; 0.17460246787819503; 0.28121914446544005]
+ @test linf_error ≈ [0.006938093883741336; 1.028235074139312; 1.0345438209717241; 1.0821111605203542; 1.2669636522564645]
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
From 27d4fd190bd7a8c76a56f9fefd062169a2682d46 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Tue, 12 Sep 2023 17:54:08 +0200
Subject: [PATCH 25/36] Shorten 3d parabolic test times (#1634)
* Shorten 3d parabolic test times
* fix typo
* clear notation
---
test/test_parabolic_3d.jl | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl
index ded052fb9d3..86076460294 100644
--- a/test/test_parabolic_3d.jl
+++ b/test/test_parabolic_3d.jl
@@ -85,7 +85,7 @@ isdir(outdir) && rm(outdir, recursive=true)
num_leafs = length(LLID)
@assert num_leafs % 16 == 0
Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/16)])
- tspan=(0.0, 1.0)
+ tspan=(0.0, 0.25)
semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver;
boundary_conditions=(boundary_conditions, boundary_conditions_parabolic),
source_terms=source_terms_navier_stokes_convergence_test)
@@ -95,8 +95,8 @@ isdir(outdir) && rm(outdir, recursive=true)
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
ode_default_options()..., callback=callbacks)
l2_error, linf_error = analysis_callback(sol)
- @test l2_error ≈ [0.0003991794175622818; 0.0008853745163670504; 0.0010658655552066817; 0.0008785559918324284; 0.001403163458422815]
- @test linf_error ≈ [0.0035306410538458177; 0.01505692306169911; 0.008862444161110705; 0.015065647972869856; 0.030402714743065218]
+ @test l2_error ≈ [0.0003109336253407314, 0.0006473493036803503, 0.0007705277238213672, 0.0006280517917198335, 0.000903927789884075]
+ @test linf_error ≈ [0.0023694155365339142, 0.010634932622402863, 0.006772070862236412, 0.010640551561726901, 0.019256819038719897]
end
@trixi_testset "TreeMesh3D: elixir_navierstokes_taylor_green_vortex.jl" begin
@@ -114,7 +114,7 @@ isdir(outdir) && rm(outdir, recursive=true)
num_leafs = length(LLID)
@assert num_leafs % 32 == 0
Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/32)])
- tspan=(0.0, 10.0)
+ tspan=(0.0, 0.1)
semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic),
initial_condition, solver)
ode = semidiscretize(semi, tspan)
@@ -128,8 +128,8 @@ isdir(outdir) && rm(outdir, recursive=true)
dt=5e-3,
save_everystep=false, callback=callbacks);
l2_error, linf_error = analysis_callback(sol)
- @test l2_error ≈ [0.0013666103707729502; 0.2313581629543744; 0.2308164306264533; 0.17460246787819503; 0.28121914446544005]
- @test linf_error ≈ [0.006938093883741336; 1.028235074139312; 1.0345438209717241; 1.0821111605203542; 1.2669636522564645]
+ @test l2_error ≈ [7.314319856736271e-5, 0.006266480163542894, 0.006266489911815533, 0.008829222305770226, 0.0032859166842329228]
+ @test linf_error ≈ [0.0002943968186086554, 0.013876261980614757, 0.013883619864959451, 0.025201279960491936, 0.018679364985388247]
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
From d206b766e3e0aad36a9808ed65c5549b8b284f73 Mon Sep 17 00:00:00 2001
From: ArseniyKholod <119304909+ArseniyKholod@users.noreply.github.com>
Date: Tue, 12 Sep 2023 19:16:44 +0200
Subject: [PATCH 26/36] Add load_timestep! for restart setup (#1614)
* add load_timestep!
* Update save_restart.jl
* Update save_restart.jl
* Update src/callbacks_step/save_restart.jl
Co-authored-by: Michael Schlottke-Lakemper
* use new function in elixirs and docs
---------
Co-authored-by: Michael Schlottke-Lakemper
Co-authored-by: Hendrik Ranocha
---
docs/src/restart.md | 3 +--
examples/p4est_2d_dgsem/elixir_advection_restart.jl | 3 +--
examples/p4est_3d_dgsem/elixir_advection_restart.jl | 3 +--
.../structured_2d_dgsem/elixir_advection_restart.jl | 3 +--
.../structured_3d_dgsem/elixir_advection_restart.jl | 3 +--
examples/tree_2d_dgsem/elixir_advection_restart.jl | 3 +--
examples/tree_3d_dgsem/elixir_advection_restart.jl | 3 +--
.../unstructured_2d_dgsem/elixir_euler_restart.jl | 3 +--
src/Trixi.jl | 2 +-
src/callbacks_step/save_restart.jl | 12 ++++++++++++
10 files changed, 21 insertions(+), 17 deletions(-)
diff --git a/docs/src/restart.md b/docs/src/restart.md
index 767269ff27d..c7cbcd11852 100644
--- a/docs/src/restart.md
+++ b/docs/src/restart.md
@@ -77,8 +77,7 @@ and its time step number, e.g.:
```julia
integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
dt=dt, save_everystep=false, callback=callbacks);
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
```
Now we can compute the solution:
diff --git a/examples/p4est_2d_dgsem/elixir_advection_restart.jl b/examples/p4est_2d_dgsem/elixir_advection_restart.jl
index 79a35199b83..52917616a6a 100644
--- a/examples/p4est_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/p4est_2d_dgsem/elixir_advection_restart.jl
@@ -35,8 +35,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/p4est_3d_dgsem/elixir_advection_restart.jl b/examples/p4est_3d_dgsem/elixir_advection_restart.jl
index b27eaab62e2..26d10cf8826 100644
--- a/examples/p4est_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/p4est_3d_dgsem/elixir_advection_restart.jl
@@ -32,8 +32,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/structured_2d_dgsem/elixir_advection_restart.jl b/examples/structured_2d_dgsem/elixir_advection_restart.jl
index 98c44fac71a..82eaa21333a 100644
--- a/examples/structured_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/structured_2d_dgsem/elixir_advection_restart.jl
@@ -34,8 +34,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
# run the simulation
diff --git a/examples/structured_3d_dgsem/elixir_advection_restart.jl b/examples/structured_3d_dgsem/elixir_advection_restart.jl
index 39d28848c77..921c5310340 100644
--- a/examples/structured_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/structured_3d_dgsem/elixir_advection_restart.jl
@@ -32,8 +32,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart.jl b/examples/tree_2d_dgsem/elixir_advection_restart.jl
index 72efb7d0c84..771ec5aefe7 100644
--- a/examples/tree_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_restart.jl
@@ -32,8 +32,7 @@ integrator = init(ode, alg,
save_everystep=false, callback=callbacks)
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
# run the simulation
diff --git a/examples/tree_3d_dgsem/elixir_advection_restart.jl b/examples/tree_3d_dgsem/elixir_advection_restart.jl
index 3061f165874..b7835ed061f 100644
--- a/examples/tree_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_3d_dgsem/elixir_advection_restart.jl
@@ -31,8 +31,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
index b85cc2c6d70..6653f8662d9 100644
--- a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
+++ b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
@@ -33,8 +33,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/src/Trixi.jl b/src/Trixi.jl
index ec4d20558e5..be43c45b93d 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -253,7 +253,7 @@ export SummaryCallback, SteadyStateCallback, AnalysisCallback, AliveCallback,
GlmSpeedCallback, LBMCollisionCallback, EulerAcousticsCouplingCallback,
TrivialCallback, AnalysisCallbackCoupled
-export load_mesh, load_time, load_timestep, load_dt
+export load_mesh, load_time, load_timestep, load_timestep!, load_dt
export ControllerThreeLevel, ControllerThreeLevelCombined,
IndicatorLöhner, IndicatorLoehner, IndicatorMax,
diff --git a/src/callbacks_step/save_restart.jl b/src/callbacks_step/save_restart.jl
index f567a5c7fda..06817a9b730 100644
--- a/src/callbacks_step/save_restart.jl
+++ b/src/callbacks_step/save_restart.jl
@@ -141,6 +141,18 @@ function load_timestep(restart_file::AbstractString)
end
end
+"""
+ load_timestep!(integrator, restart_file::AbstractString)
+
+Load the time step number saved in a `restart_file` and assign it to both the time step
+number and and the number of accepted steps
+(`iter` and `stats.naccept` in OrdinaryDiffEq.jl, respectively) in `integrator`.
+"""
+function load_timestep!(integrator, restart_file::AbstractString)
+ integrator.iter = load_timestep(restart_file)
+ integrator.stats.naccept = integrator.iter
+end
+
"""
load_dt(restart_file::AbstractString)
From bc6736183271ec191645e9a38f95790a76671d25 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 13 Sep 2023 09:07:41 +0200
Subject: [PATCH 27/36] fix allocations of P4estMesh2D BCs (#1636)
---
src/solvers/dgsem_p4est/dg_2d.jl | 4 ++--
test/test_p4est_2d.jl | 11 ++++++++++-
2 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/src/solvers/dgsem_p4est/dg_2d.jl b/src/solvers/dgsem_p4est/dg_2d.jl
index 97b931fa325..a665aa4b19d 100644
--- a/src/solvers/dgsem_p4est/dg_2d.jl
+++ b/src/solvers/dgsem_p4est/dg_2d.jl
@@ -275,9 +275,9 @@ function prolong2boundaries!(cache, u,
return nothing
end
-function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing,
+function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing,
mesh::Union{P4estMesh{2}, T8codeMesh{2}},
- equations, surface_integral, dg::DG)
+ equations, surface_integral, dg::DG) where {BC}
@unpack boundaries = cache
@unpack surface_flux_values = cache.elements
index_range = eachnode(dg)
diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl
index c4ce2619e15..31dfe1d35a5 100644
--- a/test/test_p4est_2d.jl
+++ b/test/test_p4est_2d.jl
@@ -24,7 +24,7 @@ isdir(outdir) && rm(outdir, recursive=true)
l2 = [3.198940059144588e-5],
linf = [0.00030636069494005547])
- # Ensure that we do not have excessive memory allocations
+ # Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
t = sol.t[end]
@@ -102,6 +102,15 @@ isdir(outdir) && rm(outdir, recursive=true)
l2 = [0.020291447969983396, 0.017479614254319948, 0.011387644425613437, 0.0514420126021293],
linf = [0.3582779022370579, 0.32073537890751663, 0.221818049107692, 0.9209559420400415],
tspan = (0.0, 0.15))
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
end
@trixi_testset "elixir_euler_forward_step_amr.jl" begin
From 547556dafd3c84ae8ed50fc1911e924cb4237468 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Wed, 13 Sep 2023 10:58:33 +0200
Subject: [PATCH 28/36] Avoid slicing (#1637)
Co-authored-by: Hendrik Ranocha
---
examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl | 5 ++++-
examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl | 5 ++++-
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl
index 8111df8251a..b0c6086ad63 100644
--- a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl
+++ b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl
@@ -170,7 +170,10 @@ end
initial_condition = initial_condition_navier_stokes_convergence_test
# BC types
-velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:3])
+velocity_bc_top_bottom = NoSlip() do x, t, equations
+ u = initial_condition_navier_stokes_convergence_test(x, t, equations)
+ return SVector(u[2], u[3])
+end
heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
index c426fe95f5b..0109e58dfb3 100644
--- a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
+++ b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
@@ -220,7 +220,10 @@ end
initial_condition = initial_condition_navier_stokes_convergence_test
# BC types
-velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:4])
+velocity_bc_top_bottom = NoSlip() do x, t, equations
+ u = initial_condition_navier_stokes_convergence_test(x, t, equations)
+ return SVector(u[2], u[3], u[4])
+end
heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
From 32d837b0920c3cd9218f448080495c4a29d53566 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 13 Sep 2023 11:46:52 +0200
Subject: [PATCH 29/36] new tutorial on custom RHS functions and
semidiscretizations (#1633)
* fix list of tutorials
* WIP: tutorial on semidiscretizations
* WIP: tutorial on semidiscretizations
* custom RHS
* custom semidiscretization
* update make.jl script
* WIP: trying to make Literate.jl testsets safeer
* fix
* fix reference
* comment on safe testsets
* some minor fixes
* the SciML ecosystem
* mention package versions
---
docs/literate/make.jl | 17 +-
.../src/files/custom_semidiscretization.jl | 324 ++++++++++++++++++
docs/literate/src/files/index.jl | 24 +-
docs/make.jl | 1 +
docs/src/callbacks.md | 4 +-
docs/src/overview.md | 2 +-
docs/src/parallelization.md | 6 +-
docs/src/performance.md | 2 +-
8 files changed, 366 insertions(+), 14 deletions(-)
create mode 100644 docs/literate/src/files/custom_semidiscretization.jl
diff --git a/docs/literate/make.jl b/docs/literate/make.jl
index b620f85c975..a04d8a0b333 100644
--- a/docs/literate/make.jl
+++ b/docs/literate/make.jl
@@ -51,12 +51,25 @@ function create_tutorials(files)
# Run tests on all tutorial files
@testset "TrixiTutorials" begin
for (i, (title, filename)) in enumerate(files)
+ # Evaluate each tutorial in its own module to avoid leaking of
+ # function/variable names, polluting the namespace of later tutorials
+ # by stuff defined in earlier tutorials.
if filename isa Vector # Several files of one topic
for j in eachindex(filename)
- @testset "$(filename[j][2][2])" begin include(joinpath(repo_src, filename[j][2][1], filename[j][2][2])) end
+ mod = gensym(filename[j][2][2])
+ @testset "$(filename[j][2][2])" begin
+ @eval module $mod
+ include(joinpath($repo_src, $(filename[j][2][1]), $(filename[j][2][2])))
+ end
+ end
end
else # Single files
- @testset "$title" begin include(joinpath(repo_src, filename)) end
+ mod = gensym(title)
+ @testset "$title" begin
+ @eval module $mod
+ include(joinpath($repo_src, $filename))
+ end
+ end
end
end
end
diff --git a/docs/literate/src/files/custom_semidiscretization.jl b/docs/literate/src/files/custom_semidiscretization.jl
new file mode 100644
index 00000000000..fd432fb0826
--- /dev/null
+++ b/docs/literate/src/files/custom_semidiscretization.jl
@@ -0,0 +1,324 @@
+#src # Custom semidiscretizations
+
+# As described in the [overview section](@ref overview-semidiscretizations),
+# semidiscretizations are high-level descriptions of spatial discretizations
+# in Trixi.jl. Trixi.jl's main focus is on hyperbolic conservation
+# laws represented in a [`SemidiscretizationHyperbolic`](@ref).
+# Hyperbolic-parabolic problems based on the advection-diffusion equation or
+# the compressible Navier-Stokes equations can be represented in a
+# [`SemidiscretizationHyperbolicParabolic`](@ref). This is described in the
+# [basic tutorial on parabolic terms](@ref parabolic_terms) and its extension to
+# [custom parabolic terms](@ref adding_new_parabolic_terms).
+# In this tutorial, we will describe how these semidiscretizations work and how
+# they can be used to create custom semidiscretizations involving also other tasks.
+
+
+# ## Overview of the right-hand side evaluation
+
+# The semidiscretizations provided by Trixi.jl are set up to create `ODEProblem`s from the
+# [SciML ecosystem for ordinary differential equations](https://diffeq.sciml.ai/latest/).
+# In particular, a spatial semidiscretization can be wrapped in an ODE problem
+# using [`semidiscretize`](@ref), which returns an `ODEProblem`. This `ODEProblem`
+# bundles an initial condition, a right-hand side (RHS) function, the time span,
+# and possible parameters. The `ODEProblem`s created by Trixi.jl use the semidiscretization
+# passed to [`semidiscretize`](@ref) as a parameter.
+# For a [`SemidiscretizationHyperbolic`](@ref), the `ODEProblem` wraps
+# `Trixi.rhs!` as ODE RHS.
+# For a [`SemidiscretizationHyperbolicParabolic`](@ref), Trixi.jl
+# uses a `SplitODEProblem` combining `Trixi.rhs_parabolic!` for the
+# (potentially) stiff part and `Trixi.rhs!` for the other part.
+
+
+# ## Standard Trixi.jl setup
+
+# In this tutorial, we will consider the linear advection equation
+# with source term
+# ```math
+# \partial_t u(t,x) + \partial_x u(t,x) = -\exp(-t) \sin\bigl(\pi (x - t) \bigr)
+# ```
+# with periodic boundary conditions in the domain `[-1, 1]` as a
+# model problem.
+# The initial condition is
+# ```math
+# u(0,x) = \sin(\pi x).
+# ```
+# The source term results in some damping and the analytical solution
+# ```math
+# u(t,x) = \exp(-t) \sin\bigl(\pi (x - t) \bigr).
+# ```
+# First, we discretize this equation using the standard functionality
+# of Trixi.jl.
+
+using Trixi, OrdinaryDiffEq, Plots
+
+# The linear scalar advection equation is already implemented in
+# Trixi.jl as [`LinearScalarAdvectionEquation1D`](@ref). We construct
+# it with an advection velocity `1.0`.
+
+equations = LinearScalarAdvectionEquation1D(1.0)
+
+# Next, we use a standard [`DGSEM`](@ref) solver.
+
+solver = DGSEM(polydeg = 3)
+
+# We create a simple [`TreeMesh`](@ref) in 1D.
+
+coordinates_min = (-1.0,)
+coordinates_max = (+1.0,)
+mesh = TreeMesh(coordinates_min, coordinates_max;
+ initial_refinement_level = 4,
+ n_cells_max = 10^4,
+ periodicity = true)
+
+# We wrap everything in in a semidiscretization and pass the source
+# terms as a standard Julia function. Please note that Trixi.jl uses
+# `SVector`s from
+# [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl)
+# to store the conserved variables `u`. Thus, the return value of the
+# source terms must be wrapped in an `SVector` - even if we consider
+# just a scalar problem.
+
+function initial_condition(x, t, equations)
+ return SVector(exp(-t) * sinpi(x[1] - t))
+end
+
+function source_terms_standard(u, x, t, equations)
+ return -initial_condition(x, t, equations)
+end
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition,
+ solver;
+ source_terms = source_terms_standard)
+
+# Now, we can create the `ODEProblem`, solve the resulting ODE
+# using a time integration method from
+# [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl),
+# and visualize the numerical solution at the final time using
+# [Plots.jl](https://github.com/JuliaPlots/Plots.jl).
+
+tspan = (0.0, 3.0)
+ode = semidiscretize(semi, tspan)
+
+sol = solve(ode, RDPK3SpFSAL49(); ode_default_options()...)
+
+plot(sol; label = "numerical sol.", legend = :topright)
+
+# We can also plot the analytical solution for comparison.
+# Since Trixi.jl uses `SVector`s for the variables, we take their `first`
+# (and only) component to get the scalar value for manual plotting.
+
+let
+ x = range(-1.0, 1.0; length = 200)
+ plot!(x, first.(initial_condition.(x, sol.t[end], equations)),
+ label = "analytical sol.", linestyle = :dash, legend = :topright)
+end
+
+# We can also add the initial condition to the plot.
+
+plot!(sol.u[1], semi, label = "u0", linestyle = :dot, legend = :topleft)
+
+# You can of course also use some
+# [callbacks](https://trixi-framework.github.io/Trixi.jl/stable/callbacks/)
+# provided by Trixi.jl as usual.
+
+summary_callback = SummaryCallback()
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi; interval = analysis_interval)
+alive_callback = AliveCallback(; analysis_interval)
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback)
+
+sol = solve(ode, RDPK3SpFSAL49();
+ ode_default_options()..., callback = callbacks)
+summary_callback()
+
+
+# ## Using a custom ODE right-hand side function
+
+# Next, we will solve the same problem but use our own ODE RHS function.
+# To demonstrate this, we will artificially create a global variable
+# containing the current time of the simulation.
+
+const GLOBAL_TIME = Ref(0.0)
+
+function source_terms_custom(u, x, t, equations)
+ t = GLOBAL_TIME[]
+ return -initial_condition(x, t, equations)
+end
+
+# Next, we create our own RHS function to update the global time of
+# the simulation before calling the RHS function from Trixi.jl.
+
+function rhs_source_custom!(du_ode, u_ode, semi, t)
+ GLOBAL_TIME[] = t
+ Trixi.rhs!(du_ode, u_ode, semi, t)
+end
+
+# Next, we create an `ODEProblem` manually copying over the data from
+# the one we got from [`semidiscretize`](@ref) earlier.
+
+ode_source_custom = ODEProblem(rhs_source_custom!,
+ ode.u0,
+ ode.tspan,
+ ode.p #= semi =#)
+sol_source_custom = solve(ode_source_custom, RDPK3SpFSAL49();
+ ode_default_options()...)
+
+plot(sol_source_custom; label = "numerical sol.")
+let
+ x = range(-1.0, 1.0; length = 200)
+ plot!(x, first.(initial_condition.(x, sol_source_custom.t[end], equations)),
+ label = "analytical sol.", linestyle = :dash, legend = :topleft)
+end
+plot!(sol_source_custom.u[1], semi, label = "u0", linestyle = :dot, legend = :topleft)
+
+# This also works with callbacks as usual.
+
+summary_callback = SummaryCallback()
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi; interval = analysis_interval)
+alive_callback = AliveCallback(; analysis_interval)
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback)
+
+sol = solve(ode_source_custom, RDPK3SpFSAL49();
+ ode_default_options()..., callback = callbacks)
+summary_callback()
+
+
+# ## Setting up a custom semidiscretization
+
+# Using a global constant is of course not really nice from a software
+# engineering point of view. Thus, it can often be useful to collect
+# additional data in the parameters of the `ODEProblem`. Thus, it is
+# time to create our own semidiscretization. Here, we create a small
+# wrapper of a standard semidiscretization of Trixi.jl and the current
+# global time of the simulation.
+
+struct CustomSemidiscretization{Semi, T} <: Trixi.AbstractSemidiscretization
+ semi::Semi
+ t::T
+end
+
+semi_custom = CustomSemidiscretization(semi, Ref(0.0))
+
+# To get pretty printing in the REPL, you can consider specializing
+#
+# - `Base.show(io::IO, parameters::CustomSemidiscretization)`
+# - `Base.show(io::IO, ::MIME"text/plain", parameters::CustomSemidiscretization)`
+#
+# for your custom semidiscretiation.
+
+# Next, we create our own source terms that use the global time stored
+# in the custom semidiscretiation.
+
+source_terms_custom_semi = let semi_custom = semi_custom
+ function source_terms_custom_semi(u, x, t, equations)
+ t = semi_custom.t[]
+ return -initial_condition(x, t, equations)
+ end
+end
+
+# We also create a custom ODE RHS to update the current global time
+# stored in the custom semidiscretization. We unpack the standard
+# semidiscretization created by Trixi.jl and pass it to `Trixi.rhs!`.
+
+function rhs_semi_custom!(du_ode, u_ode, semi_custom, t)
+ semi_custom.t[] = t
+ Trixi.rhs!(du_ode, u_ode, semi_custom.semi, t)
+end
+
+# Finally, we set up an `ODEProblem` and solve it numerically.
+
+ode_semi_custom = ODEProblem(rhs_semi_custom!,
+ ode.u0,
+ ode.tspan,
+ semi_custom)
+sol_semi_custom = solve(ode_semi_custom, RDPK3SpFSAL49();
+ ode_default_options()...)
+
+# If we want to make use of additional functionality provided by
+# Trixi.jl, e.g., for plotting, we need to implement a few additional
+# specializations. In this case, we forward everything to the standard
+# semidiscretization provided by Trixi.jl wrapped in our custom
+# semidiscretization.
+
+Base.ndims(semi::CustomSemidiscretization) = ndims(semi.semi)
+function Trixi.mesh_equations_solver_cache(semi::CustomSemidiscretization)
+ Trixi.mesh_equations_solver_cache(semi.semi)
+end
+
+# Now, we can plot the numerical solution as usual.
+
+plot(sol_semi_custom; label = "numerical sol.")
+let
+ x = range(-1.0, 1.0; length = 200)
+ plot!(x, first.(initial_condition.(x, sol_semi_custom.t[end], equations)),
+ label = "analytical sol.", linestyle = :dash, legend = :topleft)
+end
+plot!(sol_semi_custom.u[1], semi, label = "u0", linestyle = :dot, legend = :topleft)
+
+# This also works with many callbacks as usual. However, the
+# [`AnalysisCallback`](@ref) requires some special handling since it
+# makes use of a performance counter contained in the standard
+# semidiscretizations of Trixi.jl to report some
+# [performance metrics](@ref performance-metrics).
+# Here, we forward all accesses to the performance counter to the
+# wrapped semidiscretization.
+
+function Base.getproperty(semi::CustomSemidiscretization, s::Symbol)
+ if s === :performance_counter
+ wrapped_semi = getfield(semi, :semi)
+ wrapped_semi.performance_counter
+ else
+ getfield(semi, s)
+ end
+end
+
+# Moreover, the [`AnalysisCallback`](@ref) also performs some error
+# calculations. We also need to forward them to the wrapped
+# semidiscretization.
+
+function Trixi.calc_error_norms(func, u, t, analyzer,
+ semi::CustomSemidiscretization,
+ cache_analysis)
+ Trixi.calc_error_norms(func, u, t, analyzer,
+ semi.semi,
+ cache_analysis)
+end
+
+# Now, we can work with the callbacks used before as usual.
+
+summary_callback = SummaryCallback()
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi_custom;
+ interval = analysis_interval)
+alive_callback = AliveCallback(; analysis_interval)
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback)
+
+sol = solve(ode_semi_custom, RDPK3SpFSAL49();
+ ode_default_options()..., callback = callbacks)
+summary_callback()
+
+# For even more advanced usage of custom semidiscretizations, you
+# may look at the source code of the ones contained in Trixi.jl, e.g.,
+# - [`SemidiscretizationHyperbolicParabolic`](@ref)
+# - [`SemidiscretizationEulerGravity`](@ref)
+# - [`SemidiscretizationEulerAcoustics`](@ref)
+# - [`SemidiscretizationCoupled`](@ref)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl
index 0c8de66bf42..d42695611f6 100644
--- a/docs/literate/src/files/index.jl
+++ b/docs/literate/src/files/index.jl
@@ -76,21 +76,30 @@
# In this part, another physics model is implemented, the nonconservative linear advection equation.
# We run two different simulations with different levels of refinement and compare the resulting errors.
-# ### [10 Adaptive mesh refinement](@ref adaptive_mesh_refinement)
+# ### [10 Parabolic terms](@ref parabolic_terms)
+#-
+# This tutorial describes how parabolic terms are implemented in Trixi.jl, e.g.,
+# to solve the advection-diffusion equation.
+
+# ### [11 Adding new parabolic terms](@ref adding_new_parabolic_terms)
+#-
+# This tutorial describes how new parabolic terms can be implemented using Trixi.jl.
+
+# ### [12 Adaptive mesh refinement](@ref adaptive_mesh_refinement)
#-
# Adaptive mesh refinement (AMR) helps to increase the accuracy in sensitive or turbolent regions while
# not wasting resources for less interesting parts of the domain. This leads to much more efficient
# simulations. This tutorial presents the implementation strategy of AMR in Trixi.jl, including the use of
# different indicators and controllers.
-# ### [11 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping)
+# ### [13 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping)
#-
# In this tutorial, the use of Trixi.jl's structured curved mesh type [`StructuredMesh`](@ref) is explained.
# We present the two basic option to initialize such a mesh. First, the curved domain boundaries
# of a circular cylinder are set by explicit boundary functions. Then, a fully curved mesh is
# defined by passing the transformation mapping.
-# ### [12 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial)
+# ### [14 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial)
#-
# The purpose of this tutorial is to demonstrate how to use the [`UnstructuredMesh2D`](@ref)
# functionality of Trixi.jl. This begins by running and visualizing an available unstructured
@@ -99,19 +108,24 @@
# software in the Trixi.jl ecosystem, and then run a simulation using Trixi.jl on said mesh.
# In the end, the tutorial briefly explains how to simulate an example using AMR via `P4estMesh`.
-# ### [13 Explicit time stepping](@ref time_stepping)
+# ### [15 Explicit time stepping](@ref time_stepping)
#-
# This tutorial is about time integration using [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl).
# It explains how to use their algorithms and presents two types of time step choices - with error-based
# and CFL-based adaptive step size control.
-# ### [14 Differentiable programming](@ref differentiable_programming)
+# ### [16 Differentiable programming](@ref differentiable_programming)
#-
# This part deals with some basic differentiable programming topics. For example, a Jacobian, its
# eigenvalues and a curve of total energy (through the simulation) are calculated and plotted for
# a few semidiscretizations. Moreover, we calculate an example for propagating errors with Measurement.jl
# at the end.
+# ### [17 Custom semidiscretization](@ref custom_semidiscretization)
+#-
+# This tutorial describes the [semidiscretiations](@ref overview-semidiscretizations) of Trixi.jl
+# and explains how to extend them for custom tasks.
+
# ## Examples in Trixi.jl
diff --git a/docs/make.jl b/docs/make.jl
index 57629577ddb..f882fcf1219 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -68,6 +68,7 @@ files = [
# Topic: other stuff
"Explicit time stepping" => "time_stepping.jl",
"Differentiable programming" => "differentiable_programming.jl",
+ "Custom semidiscretizations" => "custom_semidiscretization.jl"
]
tutorials = create_tutorials(files)
diff --git a/docs/src/callbacks.md b/docs/src/callbacks.md
index 1d3e5e34b51..7f44dfd5925 100644
--- a/docs/src/callbacks.md
+++ b/docs/src/callbacks.md
@@ -30,7 +30,7 @@ An example elixir using AMR can be found at [`examples/tree_2d_dgsem/elixir_adve
The [`AnalysisCallback`](@ref) can be used to analyze the numerical solution, e.g. calculate
errors or user-specified integrals, and print the results to the screen. The results can also be
saved in a file. An example can be found at [`examples/tree_2d_dgsem/elixir_euler_vortex.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_vortex.jl).
-In [Performance metrics of the `AnalysisCallback`](@ref) you can find a detailed
+In [Performance metrics of the `AnalysisCallback`](@ref performance-metrics) you can find a detailed
description of the different performance metrics the `AnalysisCallback` computes.
### I/O
@@ -106,7 +106,7 @@ will yield the following plot:
the automated performance measurements, including an output of the recorded timers after a simulation.
* The [`VisualizationCallback`](@ref) can be used for in-situ visualization. See
[Visualizing results during a simulation](@ref).
-* The [`TrivialCallback`](@ref) does nothing and can be used to to easily disable some callbacks
+* The [`TrivialCallback`](@ref) does nothing and can be used to easily disable some callbacks
via [`trixi_include`](@ref).
### Equation-specific callbacks
diff --git a/docs/src/overview.md b/docs/src/overview.md
index 46bc28b6025..51a6272ae8e 100644
--- a/docs/src/overview.md
+++ b/docs/src/overview.md
@@ -16,7 +16,7 @@ to solve a PDE numerically are the spatial semidiscretization and the time
integration scheme.
-## Semidiscretizations
+## [Semidiscretizations](@id overview-semidiscretizations)
Semidiscretizations are high-level descriptions of spatial discretizations
specialized for certain PDEs. Trixi.jl's main focus is on hyperbolic conservation
diff --git a/docs/src/parallelization.md b/docs/src/parallelization.md
index d56777c9af4..e55471bb256 100644
--- a/docs/src/parallelization.md
+++ b/docs/src/parallelization.md
@@ -22,7 +22,7 @@ julia --threads=4
If both the environment variable and the command line argument are specified at
the same time, the latter takes precedence.
-If you use time integration methods from
+If you use time integration methods from
[OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl)
and want to use multiple threads therein, you need to set the keyword argument
`thread=OrdinaryDiffEq.True()` of the algorithms, as described in the
@@ -143,7 +143,7 @@ To start Trixi.jl in parallel with MPI, there are three options:
Switching between panes can be done by `Ctrl+b` followed by `o`.
As of March 2022, newer versions of tmpi also support mpich, which is the default
backend of MPI.jl (via MPICH_Jll.jl). To use this setup, you need to install
- `mpiexecjl` as described in the
+ `mpiexecjl` as described in the
[documentation of MPI.jl](https://juliaparallel.org/MPI.jl/v0.20/usage/#Julia-wrapper-for-mpiexec)
and make it available as `mpirun`, e.g., via a symlink of the form
```bash
@@ -161,7 +161,7 @@ To start Trixi.jl in parallel with MPI, there are three options:
### [Performance](@id parallel_performance)
For information on how to evaluate the parallel performance of Trixi.jl, please
-have a look at the [Performance metrics of the `AnalysisCallback`](@ref)
+have a look at the [Performance metrics of the `AnalysisCallback`](@ref performance-metrics)
section, specifically at the descriptions of the performance index (PID).
diff --git a/docs/src/performance.md b/docs/src/performance.md
index 428672ec75f..bbe3a3390b7 100644
--- a/docs/src/performance.md
+++ b/docs/src/performance.md
@@ -170,7 +170,7 @@ As a rule of thumb:
- Consider using `@nospecialize` for methods like custom implementations of `Base.show`.
-## Performance metrics of the `AnalysisCallback`
+## [Performance metrics of the `AnalysisCallback`](@id performance-metrics)
The [`AnalysisCallback`](@ref) computes two performance indicators that you can use to
evaluate the serial and parallel performance of Trixi.jl. They represent
measured run times that are normalized by the number of `rhs!` evaluations and
From b942775af0677ac83d77934c2326ab2b5db5ba77 Mon Sep 17 00:00:00 2001
From: Krissh Chawla <127906314+KrisshChawla@users.noreply.github.com>
Date: Wed, 13 Sep 2023 20:08:08 -0500
Subject: [PATCH 30/36] Adding quasi 1d shallow water equations (#1619)
* implementation of quasi shallow water equations 1d.
* added example elixer for shallow_water_quasi_1d
* changed the names of Quasi1d equations
* including and exported ShallowWaterEquationsQuasi1D
* exporting flux_chan_etal and flux_chan_nonconservative_etal
* minor comment fix
* adding tests
* Apply suggestions from code review
* Apply suggestions from code review
* Update src/equations/shallow_water_quasi_1d.jl
* formatting
* formatting
* forgot comma
* Apply suggestions from code review
Co-authored-by: Hendrik Ranocha
* renamed example elixir to elixir_shallow_water_quasi_1d_source_terms.jl
* Apply suggestions from code review
Co-authored-by: Andrew Winters
* Update test_tree_1d_shallowwater.jl with renamed example elixir
* comment fix
* comment fix for elixir_shallow_water_quasi_1d_source_terms.jl
* Added well-balancedness test for shallow_water_quasi_1d
The initial condition in the elixir is intended to test a discontinuous channel width 'a(x)' and bottom topography 'b(x)' on a periodic mesh.
* Added 'max_abs_speeds' function and 'lake_at_rest_error'
* Updated test_tree_1d_shallowwater with quasi well-balancedness test
* File name fix in test_tree_1d_shallowwater
* Update examples/tree_1d_dgsem/elixir_shallowwater_quasi1d_well_balanced.jl
Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com>
* Renamed to "elixir_shallowwater_quasi_1d_well_balanced.jl"
---------
Co-authored-by: Jesse Chan
Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com>
Co-authored-by: Hendrik Ranocha
Co-authored-by: Andrew Winters
---
...xir_shallow_water_quasi_1d_source_terms.jl | 60 ++++
...xir_shallowwater_quasi_1d_well_balanced.jl | 84 +++++
src/Trixi.jl | 2 +
src/equations/equations.jl | 1 +
src/equations/shallow_water_quasi_1d.jl | 323 ++++++++++++++++++
test/test_tree_1d_shallowwater.jl | 14 +
6 files changed, 484 insertions(+)
create mode 100644 examples/tree_1d_dgsem/elixir_shallow_water_quasi_1d_source_terms.jl
create mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_quasi_1d_well_balanced.jl
create mode 100644 src/equations/shallow_water_quasi_1d.jl
diff --git a/examples/tree_1d_dgsem/elixir_shallow_water_quasi_1d_source_terms.jl b/examples/tree_1d_dgsem/elixir_shallow_water_quasi_1d_source_terms.jl
new file mode 100644
index 00000000000..72747c669e2
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_shallow_water_quasi_1d_source_terms.jl
@@ -0,0 +1,60 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the quasi 1d shallow water equations
+# See Chan et al. https://doi.org/10.48550/arXiv.2307.12089 for details
+
+equations = ShallowWaterEquationsQuasi1D(gravity_constant = 9.81)
+
+initial_condition = initial_condition_convergence_test
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_chan_etal, flux_nonconservative_chan_etal)
+surface_flux = (FluxPlusDissipation(flux_chan_etal, DissipationLocalLaxFriedrichs()),
+ flux_nonconservative_chan_etal)
+solver = DGSEM(polydeg = 3, surface_flux = surface_flux,
+ volume_integral = VolumeIntegralFluxDifferencing(volume_flux))
+
+###############################################################################
+# Get the TreeMesh and setup a periodic mesh
+
+coordinates_min = 0.0
+coordinates_max = sqrt(2.0)
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level = 3,
+ n_cells_max = 10_000,
+ periodicity = true)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ source_terms = source_terms_convergence_test)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 500
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 200,
+ save_initial_solution = true,
+ save_final_solution = true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+###############################################################################
+# run the simulation
+
+# use a Runge-Kutta method with automatic (error based) time step size control
+sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-8, reltol = 1.0e-8,
+ ode_default_options()..., callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_quasi_1d_well_balanced.jl b/examples/tree_1d_dgsem/elixir_shallowwater_quasi_1d_well_balanced.jl
new file mode 100644
index 00000000000..d9f1a52b500
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_shallowwater_quasi_1d_well_balanced.jl
@@ -0,0 +1,84 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the shallow water equations with a discontinuous
+# bottom topography function and channel width function
+
+equations = ShallowWaterEquationsQuasi1D(gravity_constant = 9.81, H0 = 2.0)
+
+# Setup a truly discontinuous bottom topography function and channel width for
+# this academic testcase of well-balancedness. The errors from the analysis
+# callback are not important but the error for this lake-at-rest test case
+# `∑|H0-(h+b)|` should be around machine roundoff.
+# Works as intended for TreeMesh1D with `initial_refinement_level=3`. If the mesh
+# refinement level is changed the initial condition below may need changed as well to
+# ensure that the discontinuities lie on an element interface.
+function initial_condition_discontinuous_well_balancedness(x, t,
+ equations::ShallowWaterEquationsQuasi1D)
+ H = equations.H0
+ v = 0.0
+
+ # for a periodic domain, this choice of `b` and `a` mimic
+ # discontinuity across the periodic boundary.
+ b = 0.5 * (x[1] + 1)
+ a = 2 + x[1]
+
+ return prim2cons(SVector(H, v, b, a), equations)
+end
+
+initial_condition = initial_condition_discontinuous_well_balancedness
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_chan_etal, flux_nonconservative_chan_etal)
+surface_flux = volume_flux
+solver = DGSEM(polydeg = 4, surface_flux = surface_flux,
+ volume_integral = VolumeIntegralFluxDifferencing(volume_flux))
+
+###############################################################################
+# Get the TreeMesh and setup a periodic mesh
+
+coordinates_min = -1.0
+coordinates_max = 1.0
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level = 3,
+ n_cells_max = 10_000)
+
+# Create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solver
+
+tspan = (0.0, 100.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# Callbacks
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval,
+ extra_analysis_integrals = (lake_at_rest_error,))
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 1000,
+ save_initial_solution = true,
+ save_final_solution = true)
+
+stepsize_callback = StepsizeCallback(cfl = 3.0)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/src/Trixi.jl b/src/Trixi.jl
index be43c45b93d..c883c3bf19f 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -149,6 +149,7 @@ export AcousticPerturbationEquations2D,
LatticeBoltzmannEquations2D, LatticeBoltzmannEquations3D,
ShallowWaterEquations1D, ShallowWaterEquations2D,
ShallowWaterTwoLayerEquations1D, ShallowWaterTwoLayerEquations2D,
+ ShallowWaterEquationsQuasi1D,
LinearizedEulerEquations2D
export LaplaceDiffusion1D, LaplaceDiffusion2D,
@@ -164,6 +165,7 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle,
flux_kennedy_gruber, flux_shima_etal, flux_ec,
flux_fjordholm_etal, flux_nonconservative_fjordholm_etal, flux_es_fjordholm_etal,
flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal,
+ flux_chan_etal, flux_nonconservative_chan_etal,
hydrostatic_reconstruction_audusse_etal, flux_nonconservative_audusse_etal,
# TODO: TrixiShallowWater: move anything with "chen_noelle" to new file
hydrostatic_reconstruction_chen_noelle, flux_nonconservative_chen_noelle,
diff --git a/src/equations/equations.jl b/src/equations/equations.jl
index 570a25cece9..9bae563d85f 100644
--- a/src/equations/equations.jl
+++ b/src/equations/equations.jl
@@ -356,6 +356,7 @@ include("shallow_water_1d.jl")
include("shallow_water_2d.jl")
include("shallow_water_two_layer_1d.jl")
include("shallow_water_two_layer_2d.jl")
+include("shallow_water_quasi_1d.jl")
# CompressibleEulerEquations
abstract type AbstractCompressibleEulerEquations{NDIMS, NVARS} <:
diff --git a/src/equations/shallow_water_quasi_1d.jl b/src/equations/shallow_water_quasi_1d.jl
new file mode 100644
index 00000000000..217a764e173
--- /dev/null
+++ b/src/equations/shallow_water_quasi_1d.jl
@@ -0,0 +1,323 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+@doc raw"""
+ ShallowWaterEquationsQuasi1D(; gravity, H0 = 0, threshold_limiter = nothing threshold_wet = nothing)
+
+The quasi-1D shallow water equations (SWE). The equations are given by
+```math
+\begin{aligned}
+ \frac{\partial}{\partial t}(a h) + \frac{\partial}{\partial x}(a h v) &= 0 \\
+ \frac{\partial}{\partial t}(a h v) + \frac{\partial}{\partial x}(a h v^2)
+ + g a h \frac{\partial}{\partial x}(h + b) &= 0
+\end{aligned}
+```
+The unknown quantities of the Quasi-1D SWE are the water height ``h`` and the scaled velocity ``v``.
+The gravitational constant is denoted by `g`, the (possibly) variable bottom topography function ``b(x)``, and (possibly) variable channel width ``a(x)``. The water height ``h`` is measured from the bottom topography ``b``, therefore one also defines the total water height as ``H = h + b``.
+
+The additional quantity ``H_0`` is also available to store a reference value for the total water height that
+is useful to set initial conditions or test the "lake-at-rest" well-balancedness.
+
+Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not
+have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is
+used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial
+condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to
+define when the flow is "wet" before calculating the numerical flux.
+
+The bottom topography function ``b(x)`` and channel width ``a(x)`` are set inside the initial condition routine
+for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography
+variable `b` to zero and ``a`` to one.
+
+In addition to the unknowns, Trixi.jl currently stores the bottom topography and channel width values at the approximation points
+despite being fixed in time. This is done for convenience of computing the bottom topography gradients
+on the fly during the approximation as well as computing auxiliary quantities like the total water height ``H``
+or the entropy variables.
+This affects the implementation and use of these equations in various ways:
+* The flux values corresponding to the bottom topography and channel width must be zero.
+* The bottom topography and channel width values must be included when defining initial conditions, boundary conditions or
+ source terms.
+* [`AnalysisCallback`](@ref) analyzes this variable.
+* Trixi.jl's visualization tools will visualize the bottom topography and channel width by default.
+"""
+struct ShallowWaterEquationsQuasi1D{RealT <: Real} <:
+ AbstractShallowWaterEquations{1, 4}
+ gravity::RealT # gravitational constant
+ H0::RealT # constant "lake-at-rest" total water height
+ # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height,
+ # as a (small) shift on the initial condition and cutoff before the next time step.
+ # Default is 500*eps() which in double precision is ≈1e-13.
+ threshold_limiter::RealT
+ # `threshold_wet` applied on water height to define when the flow is "wet"
+ # before calculating the numerical flux.
+ # Default is 5*eps() which in double precision is ≈1e-15.
+ threshold_wet::RealT
+end
+
+# Allow for flexibility to set the gravitational constant within an elixir depending on the
+# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values.
+# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest"
+# well-balancedness test cases.
+# Strict default values for thresholds that performed well in many numerical experiments
+function ShallowWaterEquationsQuasi1D(; gravity_constant, H0 = zero(gravity_constant),
+ threshold_limiter = nothing,
+ threshold_wet = nothing)
+ T = promote_type(typeof(gravity_constant), typeof(H0))
+ if threshold_limiter === nothing
+ threshold_limiter = 500 * eps(T)
+ end
+ if threshold_wet === nothing
+ threshold_wet = 5 * eps(T)
+ end
+ ShallowWaterEquationsQuasi1D(gravity_constant, H0, threshold_limiter, threshold_wet)
+end
+
+have_nonconservative_terms(::ShallowWaterEquationsQuasi1D) = True()
+function varnames(::typeof(cons2cons), ::ShallowWaterEquationsQuasi1D)
+ ("a_h", "a_h_v", "b", "a")
+end
+# Note, we use the total water height, H = h + b, as the first primitive variable for easier
+# visualization and setting initial conditions
+varnames(::typeof(cons2prim), ::ShallowWaterEquationsQuasi1D) = ("H", "v", "b", "a")
+
+# Set initial conditions at physical location `x` for time `t`
+"""
+ initial_condition_convergence_test(x, t, equations::ShallowWaterEquationsQuasi1D)
+
+A smooth initial condition used for convergence tests in combination with
+[`source_terms_convergence_test`](@ref)
+(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains).
+"""
+function initial_condition_convergence_test(x, t,
+ equations::ShallowWaterEquationsQuasi1D)
+ # generates a manufactured solution.
+ # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)]
+ Omega = sqrt(2) * pi
+ H = 2.0 + 0.5 * sin(Omega * x[1] - t)
+ v = 0.25
+ b = 0.2 - 0.05 * sin(Omega * x[1])
+ a = 1 + 0.1 * cos(Omega * x[1])
+ return prim2cons(SVector(H, v, b, a), equations)
+end
+
+"""
+ source_terms_convergence_test(u, x, t, equations::ShallowWaterEquationsQuasi1D)
+
+Source terms used for convergence tests in combination with
+[`initial_condition_convergence_test`](@ref)
+(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains).
+
+This manufactured solution source term is specifically designed for the bottom topography function
+`b(x) = 0.2 - 0.05 * sin(sqrt(2) * pi *x[1])` and channel width 'a(x)= 1 + 0.1 * cos(sqrt(2) * pi * x[1])'
+as defined in [`initial_condition_convergence_test`](@ref).
+"""
+@inline function source_terms_convergence_test(u, x, t,
+ equations::ShallowWaterEquationsQuasi1D)
+ # Same settings as in `initial_condition_convergence_test`. Some derivative simplify because
+ # this manufactured solution velocity is taken to be constant
+ Omega = sqrt(2) * pi
+ H = 2.0 + 0.5 * sin(Omega * x[1] - t)
+ H_x = 0.5 * cos(Omega * x[1] - t) * Omega
+ H_t = -0.5 * cos(Omega * x[1] - t)
+
+ v = 0.25
+
+ b = 0.2 - 0.05 * sin(Omega * x[1])
+ b_x = -0.05 * cos(Omega * x[1]) * Omega
+
+ a = 1 + 0.1 * cos(Omega * x[1])
+ a_x = -0.1 * sin(Omega * x[1]) * Omega
+
+ du1 = a * H_t + v * (a_x * (H - b) + a * (H_x - b_x))
+ du2 = v * du1 + a * (equations.gravity * (H - b) * H_x)
+
+ return SVector(du1, du2, 0.0, 0.0)
+end
+
+# Calculate 1D flux for a single point
+# Note, the bottom topography and channel width have no flux
+@inline function flux(u, orientation::Integer, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, _, a = u
+ h = waterheight(u, equations)
+ v = velocity(u, equations)
+
+ p = 0.5 * a * equations.gravity * h^2
+
+ f1 = a_h_v
+ f2 = a_h_v * v + p
+
+ return SVector(f1, f2, zero(eltype(u)), zero(eltype(u)))
+end
+
+"""
+ flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+
+Non-symmetric two-point volume flux discretizing the nonconservative (source) term
+that contains the gradient of the bottom topography [`ShallowWaterEquationsQuasi1D`](@ref)
+and the channel width.
+
+Further details are available in the paper:
+- Jesse Chan, Khemraj Shukla, Xinhui Wu, Ruofeng Liu, Prani Nalluri (2023)
+ High order entropy stable schemes for the quasi-one-dimensional
+ shallow water and compressible Euler equations
+ [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089)
+"""
+@inline function flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+ a_h_ll, _, b_ll, a_ll = u_ll
+ a_h_rr, _, b_rr, a_rr = u_rr
+
+ h_ll = waterheight(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+
+ z = zero(eltype(u_ll))
+
+ return SVector(z, equations.gravity * a_ll * h_ll * (h_rr + b_rr), z, z)
+end
+
+"""
+ flux_chan_etal(u_ll, u_rr, orientation,
+ equations::ShallowWaterEquationsQuasi1D)
+
+Total energy conservative (mathematical entropy for quasi 1D shallow water equations) split form.
+When the bottom topography is nonzero this scheme will be well-balanced when used as a `volume_flux`.
+The `surface_flux` should still use, e.g., [`FluxPlusDissipation(flux_chan_etal, DissipationLocalLaxFriedrichs())`](@ref).
+
+Further details are available in the paper:
+- Jesse Chan, Khemraj Shukla, Xinhui Wu, Ruofeng Liu, Prani Nalluri (2023)
+ High order entropy stable schemes for the quasi-one-dimensional
+ shallow water and compressible Euler equations
+ [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089)
+"""
+@inline function flux_chan_etal(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+ a_h_ll, a_h_v_ll, _, _ = u_ll
+ a_h_rr, a_h_v_rr, _, _ = u_rr
+
+ v_ll = velocity(u_ll, equations)
+ v_rr = velocity(u_rr, equations)
+
+ f1 = 0.5 * (a_h_v_ll + a_h_v_rr)
+ f2 = f1 * 0.5 * (v_ll + v_rr)
+
+ return SVector(f1, f2, zero(eltype(u_ll)), zero(eltype(u_ll)))
+end
+
+# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the
+# maximum velocity magnitude plus the maximum speed of sound
+@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+ # Get the velocity quantities
+ v_ll = velocity(u_ll, equations)
+ v_rr = velocity(u_rr, equations)
+
+ # Calculate the wave celerity on the left and right
+ h_ll = waterheight(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ c_ll = sqrt(equations.gravity * h_ll)
+ c_rr = sqrt(equations.gravity * h_rr)
+
+ return max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr)
+end
+
+# Specialized `DissipationLocalLaxFriedrichs` to avoid spurious dissipation in the bottom topography
+# and channel width
+@inline function (dissipation::DissipationLocalLaxFriedrichs)(u_ll, u_rr,
+ orientation_or_normal_direction,
+ equations::ShallowWaterEquationsQuasi1D)
+ λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction,
+ equations)
+ diss = -0.5 * λ * (u_rr - u_ll)
+ return SVector(diss[1], diss[2], zero(eltype(u_ll)), zero(eltype(u_ll)))
+end
+
+@inline function max_abs_speeds(u, equations::ShallowWaterEquationsQuasi1D)
+ h = waterheight(u, equations)
+ v = velocity(u, equations)
+
+ c = equations.gravity * sqrt(h)
+ return (abs(v) + c,)
+end
+
+# Helper function to extract the velocity vector from the conservative variables
+@inline function velocity(u, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, _, _ = u
+
+ v = a_h_v / a_h
+
+ return v
+end
+
+# Convert conservative variables to primitive
+@inline function cons2prim(u, equations::ShallowWaterEquationsQuasi1D)
+ a_h, _, b, a = u
+ h = a_h / a
+ H = h + b
+ v = velocity(u, equations)
+ return SVector(H, v, b, a)
+end
+
+# Convert conservative variables to entropy variables
+# Note, only the first two are the entropy variables, the third and fourth entries still
+# just carry the bottom topography and channel width values for convenience
+@inline function cons2entropy(u, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, b, a = u
+ h = waterheight(u, equations)
+ v = velocity(u, equations)
+ #entropy variables are the same as ones in standard shallow water equations
+ w1 = equations.gravity * (h + b) - 0.5 * v^2
+ w2 = v
+
+ return SVector(w1, w2, b, a)
+end
+
+# Convert primitive to conservative variables
+@inline function prim2cons(prim, equations::ShallowWaterEquationsQuasi1D)
+ H, v, b, a = prim
+
+ a_h = a * (H - b)
+ a_h_v = a_h * v
+ return SVector(a_h, a_h_v, b, a)
+end
+
+@inline function waterheight(u, equations::ShallowWaterEquationsQuasi1D)
+ return u[1] / u[4]
+end
+
+# Entropy function for the shallow water equations is the total energy
+@inline function entropy(cons, equations::ShallowWaterEquationsQuasi1D)
+ a = cons[4]
+ return a * energy_total(cons, equations)
+end
+
+# Calculate total energy for a conservative state `cons`
+@inline function energy_total(cons, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, b, a = cons
+ e = (a_h_v^2) / (2 * a * a_h) + 0.5 * equations.gravity * (a_h^2 / a) +
+ equations.gravity * a_h * b
+ return e
+end
+
+# Calculate the error for the "lake-at-rest" test case where H = h+b should
+# be a constant value over time. Note, assumes there is a single reference
+# water height `H0` with which to compare.
+#
+# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need
+# to modify or have different versions of the `lake_at_rest_error` function
+@inline function lake_at_rest_error(u, equations::ShallowWaterEquationsQuasi1D)
+ _, _, b, _ = u
+ h = waterheight(u, equations)
+
+ # For well-balancedness testing with possible wet/dry regions the reference
+ # water height `H0` accounts for the possibility that the bottom topography
+ # can emerge out of the water as well as for the threshold offset to avoid
+ # division by a "hard" zero water heights as well.
+ H0_wet_dry = max(equations.H0, b + equations.threshold_limiter)
+
+ return abs(H0_wet_dry - (h + b))
+end
+end # @muladd
diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl
index 1e5aeac1786..09fb2d9e432 100644
--- a/test/test_tree_1d_shallowwater.jl
+++ b/test/test_tree_1d_shallowwater.jl
@@ -112,6 +112,20 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem")
linf = [0.00041080213807871235, 0.00014823261488938177, 2.220446049250313e-16],
tspan = (0.0, 0.05))
end
+
+ @trixi_testset "elixir_shallow_water_quasi_1d_source_terms.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallow_water_quasi_1d_source_terms.jl"),
+ l2 = [6.37048760275098e-5, 0.0002745658116815704, 4.436491725647962e-6, 8.872983451152218e-6],
+ linf = [0.00026747526881631956, 0.0012106730729152249, 9.098379777500165e-6, 1.8196759554278685e-5],
+ tspan = (0.0, 0.05))
+ end
+
+ @trixi_testset "elixir_shallowwater_quasi_1d_well_balanced.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_quasi_1d_well_balanced.jl"),
+ l2 = [1.4250229186905198e-14, 2.495109919406496e-12, 7.408599286788738e-17, 2.7205812409138776e-16],
+ linf = [5.284661597215745e-14, 2.74056233065078e-12, 2.220446049250313e-16, 8.881784197001252e-16],
+ tspan = (0.0, 100.0))
+ end
end
end # module
From f7b09734e1a86077a35c9cab0b9e97e68aaeb232 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Thu, 14 Sep 2023 06:41:14 +0200
Subject: [PATCH 31/36] add package versions to tutorials (#1638)
---
docs/literate/src/files/DGMulti_1.jl | 12 ++++++++++++
docs/literate/src/files/DGMulti_2.jl | 12 ++++++++++++
docs/literate/src/files/DGSEM_FluxDiff.jl | 12 ++++++++++++
.../literate/src/files/adaptive_mesh_refinement.jl | 12 ++++++++++++
.../src/files/adding_new_parabolic_terms.jl | 12 ++++++++++++
.../src/files/adding_new_scalar_equations.jl | 12 ++++++++++++
.../src/files/adding_nonconservative_equation.jl | 12 ++++++++++++
.../src/files/differentiable_programming.jl | 12 ++++++++++++
docs/literate/src/files/hohqmesh_tutorial.jl | 12 ++++++++++++
docs/literate/src/files/non_periodic_boundaries.jl | 12 ++++++++++++
docs/literate/src/files/parabolic_terms.jl | 11 +++++++++++
.../src/files/scalar_linear_advection_1d.jl | 12 ++++++++++++
docs/literate/src/files/shock_capturing.jl | 12 ++++++++++++
docs/literate/src/files/structured_mesh_mapping.jl | 12 ++++++++++++
docs/literate/src/files/time_stepping.jl | 14 +++++++++++++-
docs/literate/src/files/upwind_fdsbp.jl | 12 ++++++++++++
16 files changed, 192 insertions(+), 1 deletion(-)
diff --git a/docs/literate/src/files/DGMulti_1.jl b/docs/literate/src/files/DGMulti_1.jl
index 0d78e79907c..5ef577e8eeb 100644
--- a/docs/literate/src/files/DGMulti_1.jl
+++ b/docs/literate/src/files/DGMulti_1.jl
@@ -194,3 +194,15 @@ plot(pd["rho"])
plot!(getmesh(pd))
# For more information, please have a look in the [StartUpDG.jl documentation](https://jlchan.github.io/StartUpDG.jl/stable/).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "StartUpDG", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/DGMulti_2.jl b/docs/literate/src/files/DGMulti_2.jl
index 92dce43cdab..06248562343 100644
--- a/docs/literate/src/files/DGMulti_2.jl
+++ b/docs/literate/src/files/DGMulti_2.jl
@@ -38,3 +38,15 @@ D = couple_continuously(legendre_derivative_operator(xmin=0.0, xmax=1.0, N=4),
# For more information and other SBP operators, see the documentations of [StartUpDG.jl](https://jlchan.github.io/StartUpDG.jl/dev/)
# and [SummationByPartsOperators.jl](https://ranocha.de/SummationByPartsOperators.jl/stable/).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "StartUpDG", "SummationByPartsOperators"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/DGSEM_FluxDiff.jl b/docs/literate/src/files/DGSEM_FluxDiff.jl
index 5ec156ebbe3..a5769900269 100644
--- a/docs/literate/src/files/DGSEM_FluxDiff.jl
+++ b/docs/literate/src/files/DGSEM_FluxDiff.jl
@@ -236,3 +236,15 @@ plot(sol)
# [`flux_chandrashekar`](@ref), [`flux_kennedy_gruber`](@ref).
# As surface flux you can use all volume fluxes and additionally for instance [`flux_lax_friedrichs`](@ref),
# [`flux_hll`](@ref), [`flux_hllc`](@ref).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/adaptive_mesh_refinement.jl b/docs/literate/src/files/adaptive_mesh_refinement.jl
index d6150e887a8..46af8f79523 100644
--- a/docs/literate/src/files/adaptive_mesh_refinement.jl
+++ b/docs/literate/src/files/adaptive_mesh_refinement.jl
@@ -202,3 +202,15 @@ plot!(getmesh(pd))
# Source: Trixi.jl's YouTube channel [`Trixi Framework`](https://www.youtube.com/channel/UCpd92vU2HjjTPup-AIN0pkg)
# For more information, please have a look at the respective links.
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/adding_new_parabolic_terms.jl b/docs/literate/src/files/adding_new_parabolic_terms.jl
index 882f73f66ff..f5c2b815f33 100644
--- a/docs/literate/src/files/adding_new_parabolic_terms.jl
+++ b/docs/literate/src/files/adding_new_parabolic_terms.jl
@@ -158,3 +158,15 @@ sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
using Plots
plot(sol)
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
+
diff --git a/docs/literate/src/files/adding_new_scalar_equations.jl b/docs/literate/src/files/adding_new_scalar_equations.jl
index fec7bcf667a..a65b4de7f1a 100644
--- a/docs/literate/src/files/adding_new_scalar_equations.jl
+++ b/docs/literate/src/files/adding_new_scalar_equations.jl
@@ -211,3 +211,15 @@ semi = remake(semi, solver=DGSEM(3, flux_godunov, VolumeIntegralFluxDifferencing
ode = semidiscretize(semi, (0.0, 0.5))
sol = solve(ode, SSPRK43(); ode_default_options()...)
plot(sol)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/adding_nonconservative_equation.jl b/docs/literate/src/files/adding_nonconservative_equation.jl
index 110fa486070..b40e21fb11a 100644
--- a/docs/literate/src/files/adding_nonconservative_equation.jl
+++ b/docs/literate/src/files/adding_nonconservative_equation.jl
@@ -288,3 +288,15 @@ sol = solve(ode, Tsit5(), abstol=1.0e-6, reltol=1.0e-6,
## Plot the numerical solution at the final time
using Plots: plot
plot(sol);
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/differentiable_programming.jl b/docs/literate/src/files/differentiable_programming.jl
index 5c5a7cd7440..33427803afc 100644
--- a/docs/literate/src/files/differentiable_programming.jl
+++ b/docs/literate/src/files/differentiable_programming.jl
@@ -446,3 +446,15 @@ scatter(real.(λ), imag.(λ))
λ = eigvals(Matrix(A))
relative_maximum = maximum(real, λ) / maximum(abs, λ)
@test relative_maximum < 1.0e-15 #src
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots", "ForwardDiff"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/hohqmesh_tutorial.jl b/docs/literate/src/files/hohqmesh_tutorial.jl
index 87076108d91..b19d363c4bf 100644
--- a/docs/literate/src/files/hohqmesh_tutorial.jl
+++ b/docs/literate/src/files/hohqmesh_tutorial.jl
@@ -566,3 +566,15 @@ mesh = UnstructuredMesh2D(mesh_file);
# for details.
# ![simulation_straight_sides_p4est_amr](https://user-images.githubusercontent.com/74359358/168049930-8abce6ac-cd47-4d04-b40b-0fa459bbd98d.png)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots", "Trixi2Vtk", "HOHQMesh"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/non_periodic_boundaries.jl b/docs/literate/src/files/non_periodic_boundaries.jl
index 54da88a64aa..7ed6324ff99 100644
--- a/docs/literate/src/files/non_periodic_boundaries.jl
+++ b/docs/literate/src/files/non_periodic_boundaries.jl
@@ -155,3 +155,15 @@ end
#
# ```
# Source: [`Video`](https://www.youtube.com/watch?v=w0A9X38cSe4) on Trixi.jl's YouTube channel [`Trixi Framework`](https://www.youtube.com/watch?v=WElqqdMhY4A)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/parabolic_terms.jl b/docs/literate/src/files/parabolic_terms.jl
index bac0098f8e9..d0a355bbc19 100644
--- a/docs/literate/src/files/parabolic_terms.jl
+++ b/docs/literate/src/files/parabolic_terms.jl
@@ -86,3 +86,14 @@ sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
using Plots
plot(sol)
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/scalar_linear_advection_1d.jl b/docs/literate/src/files/scalar_linear_advection_1d.jl
index 42c831c98ba..77ba7b087cc 100644
--- a/docs/literate/src/files/scalar_linear_advection_1d.jl
+++ b/docs/literate/src/files/scalar_linear_advection_1d.jl
@@ -511,3 +511,15 @@ sol_trixi = solve(ode_trixi, RDPK3SpFSAL49(); abstol=1.0e-6, reltol=1.0e-6, ode
plot!(sol_trixi, label="solution at t=$(tspan[2]) with Trixi.jl", legend=:topleft, linestyle=:dash, lw=2)
@test maximum(abs.(vec(u0) - sol_trixi.u[end])) ≈ maximum(abs.(u0 - sol.u[end])) #src
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/shock_capturing.jl b/docs/literate/src/files/shock_capturing.jl
index afa34cbf06a..dd6698c2a86 100644
--- a/docs/literate/src/files/shock_capturing.jl
+++ b/docs/literate/src/files/shock_capturing.jl
@@ -224,3 +224,15 @@ sol = solve(ode, CarpenterKennedy2N54(stage_limiter!, williamson_condition=false
using Plots
plot(sol)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/structured_mesh_mapping.jl b/docs/literate/src/files/structured_mesh_mapping.jl
index 0ae9cf723f8..c8da30bc2bf 100644
--- a/docs/literate/src/files/structured_mesh_mapping.jl
+++ b/docs/literate/src/files/structured_mesh_mapping.jl
@@ -201,3 +201,15 @@ plot!(getmesh(pd))
# unstructured mesh type [`UnstructuredMesh2D`] and its use of the
# [High-Order Hex-Quad Mesh (HOHQMesh) generator](https://github.com/trixi-framework/HOHQMesh),
# created and developed by David Kopriva.
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/time_stepping.jl b/docs/literate/src/files/time_stepping.jl
index d400c4a94be..de7a2a83a41 100644
--- a/docs/literate/src/files/time_stepping.jl
+++ b/docs/literate/src/files/time_stepping.jl
@@ -49,7 +49,7 @@
# ```math
# \Delta t_n = \text{CFL} * \min_i \frac{\Delta x_i}{\lambda_{\max}(u_i^n)}
# ```
-# We compute $\Delta x_i$ by scaling the element size by a factor of $1/(N+1)$, cf.
+# We compute $\Delta x_i$ by scaling the element size by a factor of $1/(N+1)$, cf.
# [Gassner and Kopriva (2011)](https://doi.org/10.1137/100807211), Section 5.
# Trixi.jl provides such a CFL-based step size control. It is implemented as the callback
@@ -73,3 +73,15 @@
# You can find simple examples with a CFL-based step size control for instance in the elixirs
# [`elixir_advection_basic.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_basic.jl)
# or [`elixir_euler_source_terms.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_source_terms.jl).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/upwind_fdsbp.jl b/docs/literate/src/files/upwind_fdsbp.jl
index 36ca1b57404..6d3379fa30d 100644
--- a/docs/literate/src/files/upwind_fdsbp.jl
+++ b/docs/literate/src/files/upwind_fdsbp.jl
@@ -62,3 +62,15 @@ Matrix(D_upw.plus)
# flux vector splitting, e.g.,
# - [`elixir_euler_vortex.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_fdsbp/elixir_euler_vortex.jl)
# - [`elixir_euler_taylor_green_vortex.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_3d_fdsbp/elixir_euler_taylor_green_vortex.jl)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "SummationByPartsOperators"],
+ mode=PKGMODE_MANIFEST)
From 15543f28b7070dbc403857a047d5dc2ca2d0c1c3 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Thu, 14 Sep 2023 06:45:19 +0200
Subject: [PATCH 32/36] set version to v0.5.43
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 06fd29ba590..943d0d48005 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.43-pre"
+version = "0.5.43"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 92dedde81e12f8ac2259785a9eab40f475d82251 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Thu, 14 Sep 2023 06:45:34 +0200
Subject: [PATCH 33/36] set development version to v0.5.44-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 943d0d48005..d134a8e548b 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.43"
+version = "0.5.44-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 6069149fef0590c824ddee54b6d12d7531b6c790 Mon Sep 17 00:00:00 2001
From: ArseniyKholod <119304909+ArseniyKholod@users.noreply.github.com>
Date: Fri, 15 Sep 2023 08:10:03 +0200
Subject: [PATCH 34/36] Handles restarting problem of adaptive time integration
methods (#1565)
* Create test.jl
* Delete test.jl
* loadcallback
* adding_parallel_support
* formatting
* minimize dependencies
* combine loadrestart and saverestart
* fix
* Update test_threaded.jl
* fix
* test fix
* fix
* MODULE add
* fix
* runtime macros
* Update test_threaded.jl
* handle MPI issues
* enable PIDController test
* Update test_mpi_tree.jl
* fix
* add asserts
* Update save_restart.jl
* add IController tests
* enable HDF5 parallel
* fix shot
* fix shot 2
* fix shot 3
* fix shot 4
* fix shot 5
* fix shot 6
* fix shot 7
* fix shot 8
* fix shot 9
* fix shot 10
* fix shot 11
* fix shot 12
* fix shot 13
* fix shot 14
* fix shot 15
* fix shot 16
* fix shot 17
* fix shot 18
* enable additional configuration only in mpi test on linux
* enable environment
* test coverage issue
* disable mpi macOs CI because of failure
* disable new configurations to test coverage
* disable PID and I test to test coverage issue
* enable old coverage all
* undo last commit and enable coverage on windows
* enable new tests, mpi macOs and HDF5 parallel
* fix
* enable coverage on threads
* test HDF5 parallel
* test HDF5 parallel 2
* test HDF5 parallel 3
* fix
* Update save_restart_dg.jl
* test HDF5 parallel 4
* test HDF5 parallel 5
* Update configure_packages.jl
* delete unnecessary changes
* Update save_restart_dg.jl
* Update save_restart_dg.jl
* remove dependency on OrdinaryDiffEq
* format
* discard unrelated changes
* delete barrier
* delete eval()
* comments & delete mpi_parallel
* format
* Update runtests.jl
* Update runtests.jl
* simplify tests
* test failing MPI on windiws and macOs
* test with RDPK3SpFSAL49
* test with RDPK3SpFSAL35
* change tests
* fix and new test
* Update test_tree_2d_euler.jl
* fix and delete unnecessary test
* add printing format
* add docstrings
* Update src/callbacks_step/save_restart.jl
Co-authored-by: Michael Schlottke-Lakemper
* fix
* formatting
* Update src/callbacks_step/save_restart_dg.jl
Co-authored-by: Michael Schlottke-Lakemper
* Update src/callbacks_step/save_restart_dg.jl
Co-authored-by: Michael Schlottke-Lakemper
* Update src/callbacks_step/save_restart.jl
Co-authored-by: Michael Schlottke-Lakemper
* Update src/callbacks_step/save_restart.jl
Co-authored-by: Michael Schlottke-Lakemper
* Update src/callbacks_step/save_restart.jl
Co-authored-by: Michael Schlottke-Lakemper
* suggested changes
* new test
* fix
* fix
* Update test_tree_2d_advection.jl
* fix
* fix error mpi on windows
* rerun
* Update src/callbacks_step/save_restart.jl
Co-authored-by: Hendrik Ranocha
* Add comments
* format
---------
Co-authored-by: Michael Schlottke-Lakemper
Co-authored-by: Hendrik Ranocha
---
.../elixir_advection_extended.jl | 7 ++--
.../tree_2d_dgsem/elixir_advection_restart.jl | 19 +++++----
src/Trixi.jl | 3 +-
src/callbacks_step/save_restart.jl | 36 +++++++++++++++++
src/callbacks_step/save_restart_dg.jl | 24 ++++++++++++
test/test_mpi_tree.jl | 20 ++++++++--
test/test_threaded.jl | 39 ++++++++++++-------
test/test_tree_2d_advection.jl | 20 ++++++++--
8 files changed, 135 insertions(+), 33 deletions(-)
diff --git a/examples/tree_2d_dgsem/elixir_advection_extended.jl b/examples/tree_2d_dgsem/elixir_advection_extended.jl
index 8c837957ffd..278dc85386d 100644
--- a/examples/tree_2d_dgsem/elixir_advection_extended.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_extended.jl
@@ -54,7 +54,7 @@ analysis_callback = AnalysisCallback(semi, interval=analysis_interval,
alive_callback = AliveCallback(analysis_interval=analysis_interval)
# The SaveRestartCallback allows to save a file from which a Trixi.jl simulation can be restarted
-save_restart = SaveRestartCallback(interval=100,
+save_restart = SaveRestartCallback(interval=40,
save_final_restart=true)
# The SaveSolutionCallback allows to save the solution to a file in regular intervals
@@ -77,9 +77,10 @@ callbacks = CallbackSet(summary_callback,
# run the simulation
# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
+alg = CarpenterKennedy2N54(williamson_condition=false)
+sol = solve(ode, alg,
dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+ save_everystep=false, callback=callbacks; ode_default_options()...);
# Print the timer summary
summary_callback()
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart.jl b/examples/tree_2d_dgsem/elixir_advection_restart.jl
index 771ec5aefe7..b63a8d1f7bc 100644
--- a/examples/tree_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_restart.jl
@@ -3,9 +3,10 @@ using OrdinaryDiffEq
using Trixi
###############################################################################
-# create a restart file
-
-trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_extended.jl"))
+# Define time integration algorithm
+alg = CarpenterKennedy2N54(williamson_condition=false)
+# Create a restart file
+trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_extended.jl"), alg = alg, tspan = (0.0, 10.0))
###############################################################################
@@ -14,22 +15,26 @@ trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_extended.jl"))
# Note: If you get a restart file from somewhere else, you need to provide
# appropriate setups in the elixir loading a restart file
-restart_filename = joinpath("out", "restart_000018.h5")
+restart_filename = joinpath("out", "restart_000040.h5")
mesh = load_mesh(restart_filename)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
-tspan = (load_time(restart_filename), 2.0)
+tspan = (load_time(restart_filename), 10.0)
dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
save_solution.condition.save_initial_solution = false
-alg = CarpenterKennedy2N54(williamson_condition=false)
integrator = init(ode, alg,
dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks)
+ save_everystep=false, callback=callbacks; ode_default_options()...)
+
+# Load saved context for adaptive time integrator
+if integrator.opts.adaptive
+ load_adaptive_time_integrator!(integrator, restart_filename)
+end
# Get the last time index and work with that.
load_timestep!(integrator, restart_filename)
diff --git a/src/Trixi.jl b/src/Trixi.jl
index c883c3bf19f..b65d03e7975 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -255,7 +255,8 @@ export SummaryCallback, SteadyStateCallback, AnalysisCallback, AliveCallback,
GlmSpeedCallback, LBMCollisionCallback, EulerAcousticsCouplingCallback,
TrivialCallback, AnalysisCallbackCoupled
-export load_mesh, load_time, load_timestep, load_timestep!, load_dt
+export load_mesh, load_time, load_timestep, load_timestep!, load_dt,
+ load_adaptive_time_integrator!
export ControllerThreeLevel, ControllerThreeLevelCombined,
IndicatorLöhner, IndicatorLoehner, IndicatorMax,
diff --git a/src/callbacks_step/save_restart.jl b/src/callbacks_step/save_restart.jl
index 06817a9b730..0d174d85805 100644
--- a/src/callbacks_step/save_restart.jl
+++ b/src/callbacks_step/save_restart.jl
@@ -105,6 +105,11 @@ function (restart_callback::SaveRestartCallback)(integrator)
end
save_restart_file(u_ode, t, dt, iter, semi, restart_callback)
+ # If using an adaptive time stepping scheme, store controller values for restart
+ if integrator.opts.adaptive
+ save_adaptive_time_integrator(integrator, integrator.opts.controller,
+ restart_callback)
+ end
end
# avoid re-evaluating possible FSAL stages
@@ -168,5 +173,36 @@ function load_restart_file(semi::AbstractSemidiscretization, restart_file)
load_restart_file(mesh_equations_solver_cache(semi)..., restart_file)
end
+"""
+ load_adaptive_time_integrator!(integrator, restart_file::AbstractString)
+
+Load the context information for time integrators with error-based step size control
+saved in a `restart_file`.
+"""
+function load_adaptive_time_integrator!(integrator, restart_file::AbstractString)
+ controller = integrator.opts.controller
+ # Read context information for controller
+ h5open(restart_file, "r") do file
+ # Ensure that the necessary information was saved
+ if !("time_integrator_qold" in keys(attributes(file))) ||
+ !("time_integrator_dtpropose" in keys(attributes(file))) ||
+ (hasproperty(controller, :err) &&
+ !("time_integrator_controller_err" in keys(attributes(file))))
+ error("Missing data in restart file: check the consistency of adaptive time controller with initial setup!")
+ end
+ # Load data that is required both for PIController and PIDController
+ integrator.qold = read(attributes(file)["time_integrator_qold"])
+ integrator.dtpropose = read(attributes(file)["time_integrator_dtpropose"])
+ # Accept step to use dtpropose already in the first step
+ integrator.accept_step = true
+ # Reevaluate integrator.fsal_first on the first step
+ integrator.reeval_fsal = true
+ # Load additional parameters for PIDController
+ if hasproperty(controller, :err) # Distinguish PIDController from PIController
+ controller.err[:] = read(attributes(file)["time_integrator_controller_err"])
+ end
+ end
+end
+
include("save_restart_dg.jl")
end # @muladd
diff --git a/src/callbacks_step/save_restart_dg.jl b/src/callbacks_step/save_restart_dg.jl
index 8db6db2d2b8..cddeef77bb2 100644
--- a/src/callbacks_step/save_restart_dg.jl
+++ b/src/callbacks_step/save_restart_dg.jl
@@ -327,4 +327,28 @@ function load_restart_file_on_root(mesh::Union{ParallelTreeMesh, ParallelP4estMe
return u_ode
end
+
+# Store controller values for an adaptive time stepping scheme
+function save_adaptive_time_integrator(integrator,
+ controller, restart_callback)
+ # Save only on root
+ if mpi_isroot()
+ @unpack output_directory = restart_callback
+ timestep = integrator.stats.naccept
+
+ # Filename based on current time step
+ filename = joinpath(output_directory, @sprintf("restart_%06d.h5", timestep))
+
+ # Open file (preserve existing content)
+ h5open(filename, "r+") do file
+ # Add context information as attributes both for PIController and PIDController
+ attributes(file)["time_integrator_qold"] = integrator.qold
+ attributes(file)["time_integrator_dtpropose"] = integrator.dtpropose
+ # For PIDController is necessary to save additional parameters
+ if hasproperty(controller, :err) # Distinguish PIDController from PIController
+ attributes(file)["time_integrator_controller_err"] = controller.err
+ end
+ end
+ end
+end
end # @muladd
diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl
index 8403fcf1b04..8f08a9d72e7 100644
--- a/test/test_mpi_tree.jl
+++ b/test/test_mpi_tree.jl
@@ -23,10 +23,22 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows()
end
@trixi_testset "elixir_advection_restart.jl" begin
- @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
- # Expected errors are exactly the same as in the serial test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ using OrdinaryDiffEq: RDPK3SpFSAL49
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"))
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
+ alg = RDPK3SpFSAL49(), tspan = (0.0, 10.0))
+ l2_expected, linf_expected = analysis_callback(sol)
+
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"))
+ # Errors are exactly the same as in the elixir_advection_extended.jl
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
+ alg = RDPK3SpFSAL49())
+ l2_actual, linf_actual = analysis_callback(sol)
+
+ Trixi.mpi_isroot() && @test l2_actual == l2_expected
+ Trixi.mpi_isroot() && @test linf_actual == linf_expected
end
@trixi_testset "elixir_advection_mortar.jl" begin
diff --git a/test/test_threaded.jl b/test/test_threaded.jl
index 9b30836d0ed..2337d73f30a 100644
--- a/test/test_threaded.jl
+++ b/test/test_threaded.jl
@@ -12,27 +12,38 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
@testset "Threaded tests" begin
@testset "TreeMesh" begin
@trixi_testset "elixir_advection_restart.jl" begin
- @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_restart.jl"),
- # Expected errors are exactly the same as in the serial test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ elixir = joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_extended.jl")
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(elixir)
+ trixi_include(@__MODULE__, elixir, tspan = (0.0, 10.0))
+ l2_expected, linf_expected = analysis_callback(sol)
+
+ elixir = joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_restart.jl")
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(elixir)
+ # Errors are exactly the same as in the elixir_advection_extended.jl
+ trixi_include(@__MODULE__, elixir)
+ l2_actual, linf_actual = analysis_callback(sol)
+
+ Trixi.mpi_isroot() && @test l2_actual == l2_expected
+ Trixi.mpi_isroot() && @test linf_actual == linf_expected
- # Ensure that we do not have excessive memory allocations
- # (e.g., from type instabilities)
- let
- t = sol.t[end]
- u_ode = sol.u[end]
- du_ode = similar(u_ode)
- @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
- end
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_advection_restart.jl with threaded time integration" begin
@test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_restart.jl"),
alg = CarpenterKennedy2N54(williamson_condition = false, thread = OrdinaryDiffEq.True()),
# Expected errors are exactly the same as in the serial test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ l2 = [8.005068880114254e-6],
+ linf = [6.39093577996519e-5])
end
@trixi_testset "elixir_advection_amr_refine_twice.jl" begin
diff --git a/test/test_tree_2d_advection.jl b/test/test_tree_2d_advection.jl
index 973d0caf88b..36cb1e882cc 100644
--- a/test/test_tree_2d_advection.jl
+++ b/test/test_tree_2d_advection.jl
@@ -25,10 +25,22 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
end
@trixi_testset "elixir_advection_restart.jl" begin
- @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
- # Expected errors are exactly the same as in the parallel test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ using OrdinaryDiffEq: SSPRK43
+ println("═"^100)
+ println(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"))
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
+ alg = SSPRK43(), tspan = (0.0, 10.0))
+ l2_expected, linf_expected = analysis_callback(sol)
+
+ println("═"^100)
+ println(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"))
+ # Errors are exactly the same as in the elixir_advection_extended.jl
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
+ alg = SSPRK43())
+ l2_actual, linf_actual = analysis_callback(sol)
+
+ @test l2_actual == l2_expected
+ @test linf_actual == linf_expected
end
@trixi_testset "elixir_advection_mortar.jl" begin
From 73384acbf45cf10710cfc817bc91a1812a0db1fd Mon Sep 17 00:00:00 2001
From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com>
Date: Sat, 16 Sep 2023 16:16:12 +0200
Subject: [PATCH 35/36] Assure conservation for SSP scheme (#1640)
* Add denominator variable for SSP scheme
* Fix format
* Implement suggestions
---
src/time_integration/methods_SSP.jl | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/src/time_integration/methods_SSP.jl b/src/time_integration/methods_SSP.jl
index 8ecad69748b..a0ed889968a 100644
--- a/src/time_integration/methods_SSP.jl
+++ b/src/time_integration/methods_SSP.jl
@@ -24,14 +24,16 @@ The third-order SSP Runge-Kutta method of Shu and Osher.
This is an experimental feature and may change in future releases.
"""
struct SimpleSSPRK33{StageCallbacks} <: SimpleAlgorithmSSP
- a::SVector{3, Float64}
- b::SVector{3, Float64}
+ numerator_a::SVector{3, Float64}
+ numerator_b::SVector{3, Float64}
+ denominator::SVector{3, Float64}
c::SVector{3, Float64}
stage_callbacks::StageCallbacks
function SimpleSSPRK33(; stage_callbacks = ())
- a = SVector(0.0, 3 / 4, 1 / 3)
- b = SVector(1.0, 1 / 4, 2 / 3)
+ numerator_a = SVector(0.0, 3.0, 1.0) # a = numerator_a / denominator
+ numerator_b = SVector(1.0, 1.0, 2.0) # b = numerator_b / denominator
+ denominator = SVector(1.0, 4.0, 3.0)
c = SVector(0.0, 1.0, 1 / 2)
# Butcher tableau
@@ -42,7 +44,8 @@ struct SimpleSSPRK33{StageCallbacks} <: SimpleAlgorithmSSP
# --------------------
# b | 1/6 1/6 2/3
- new{typeof(stage_callbacks)}(a, b, c, stage_callbacks)
+ new{typeof(stage_callbacks)}(numerator_a, numerator_b, denominator, c,
+ stage_callbacks)
end
end
@@ -166,7 +169,9 @@ function solve!(integrator::SimpleIntegratorSSP)
end
# perform convex combination
- @. integrator.u = alg.a[stage] * integrator.r0 + alg.b[stage] * integrator.u
+ @. integrator.u = (alg.numerator_a[stage] * integrator.r0 +
+ alg.numerator_b[stage] * integrator.u) /
+ alg.denominator[stage]
end
integrator.iter += 1
From a64004d98c7a1b0c4894663c176f21c2178a3630 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
<41898282+github-actions[bot]@users.noreply.github.com>
Date: Tue, 19 Sep 2023 11:49:30 +0200
Subject: [PATCH 36/36] CompatHelper: bump compat for Documenter to 1 for
package docs, (keep existing compat) (#1641)
* CompatHelper: bump compat for Documenter to 1 for package docs, (keep existing compat)
* remove strict since it is removed and active by default
* allow only v1 of Documenter.jl
* ignore size threshold for API reference of Trixi.jl
* try to fix size_threshold_ignore
---------
Co-authored-by: CompatHelper Julia
Co-authored-by: Hendrik Ranocha
Co-authored-by: Hendrik Ranocha
---
docs/Project.toml | 2 +-
docs/make.jl | 10 +++++-----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/docs/Project.toml b/docs/Project.toml
index 9fc974d6f38..ffa86e0b9f7 100644
--- a/docs/Project.toml
+++ b/docs/Project.toml
@@ -13,7 +13,7 @@ Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95"
[compat]
CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10"
-Documenter = "0.27"
+Documenter = "1"
ForwardDiff = "0.10"
HOHQMesh = "0.1, 0.2"
LaTeXStrings = "1.2"
diff --git a/docs/make.jl b/docs/make.jl
index f882fcf1219..df8ac04be12 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -77,7 +77,7 @@ makedocs(
# Specify modules for which docstrings should be shown
modules = [Trixi, Trixi2Vtk],
# Set sitename to Trixi.jl
- sitename="Trixi.jl",
+ sitename = "Trixi.jl",
# Provide additional formatting options
format = Documenter.HTML(
# Disable pretty URLs during manual testing
@@ -85,7 +85,8 @@ makedocs(
# Explicitly add favicon as asset
assets = ["assets/favicon.ico"],
# Set canonical URL to GitHub pages URL
- canonical = "https://trixi-framework.github.io/Trixi.jl/stable"
+ canonical = "https://trixi-framework.github.io/Trixi.jl/stable",
+ size_threshold_ignore = ["reference-trixi.md"]
),
# Explicitly specify documentation structure
pages = [
@@ -124,9 +125,8 @@ makedocs(
"Authors" => "authors.md",
"Contributing" => "contributing.md",
"Code of Conduct" => "code_of_conduct.md",
- "License" => "license.md"
- ],
- strict = true # to make the GitHub action fail when doctests fail, see https://github.com/neuropsychology/Psycho.jl/issues/34
+ "License" => "license.md",
+ ]
)
deploydocs(