From 13b26a3d5c27c6c8040b56a56cb35b66f0e3bb42 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 4 Jul 2023 03:25:15 +0200
Subject: [PATCH 01/40] Bump crate-ci/typos from 1.15.6 to 1.15.10 (#1559)
Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.15.6 to 1.15.10.
- [Release notes](https://github.com/crate-ci/typos/releases)
- [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crate-ci/typos/compare/v1.15.6...v1.15.10)
---
updated-dependencies:
- dependency-name: crate-ci/typos
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/SpellCheck.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
index 93bee1ce4fc..90bd9366b50 100644
--- a/.github/workflows/SpellCheck.yml
+++ b/.github/workflows/SpellCheck.yml
@@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v3
- name: Check spelling
- uses: crate-ci/typos@v1.15.6
+ uses: crate-ci/typos@v1.15.10
From 58fbab4df7bdfaf161fff16214f629a10426532b Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 5 Jul 2023 16:06:48 +0200
Subject: [PATCH 02/40] fix typo in docs (#1560)
---
docs/literate/src/files/shock_capturing.jl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/literate/src/files/shock_capturing.jl b/docs/literate/src/files/shock_capturing.jl
index b165f7ec8bd..afa34cbf06a 100644
--- a/docs/literate/src/files/shock_capturing.jl
+++ b/docs/literate/src/files/shock_capturing.jl
@@ -48,7 +48,7 @@
# with the total energy $\mathbb{E}=\max\big(\frac{m_N^2}{\sum_{j=0}^N m_j^2}, \frac{m_{N-1}^2}{\sum_{j=0}^{N-1} m_j^2}\big)$,
# threshold $\mathbb{T}= 0.5 * 10^{-1.8*(N+1)^{1/4}}$ and parameter $s=ln\big(\frac{1-0.0001}{0.0001}\big)\approx 9.21024$.
-# For computational efficiency, $\alpha_{min}$ is introduced und used for
+# For computational efficiency, $\alpha_{min}$ is introduced and used for
# ```math
# \tilde{\alpha} = \begin{cases}
# 0, & \text{if } \alpha<\alpha_{min}\\
From 3bd55515a03dac926446cbb8ee41edd21d9baec0 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 8 Jul 2023 06:16:24 +0200
Subject: [PATCH 03/40] set version to v0.5.31
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 0edba6b681c..81657e868db 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.31-pre"
+version = "0.5.31"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From dc364ebc665c7f0ab74601ba0041618dddbabc18 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 8 Jul 2023 06:16:45 +0200
Subject: [PATCH 04/40] set development version to v0.5.32-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 81657e868db..828f4778f74 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.31"
+version = "0.5.32-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 766e3f94465f48608c92d1fe91cd46db4c31c362 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 11 Jul 2023 07:14:30 +0200
Subject: [PATCH 05/40] Bump crate-ci/typos from 1.15.10 to 1.16.0 (#1563)
Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.15.10 to 1.16.0.
- [Release notes](https://github.com/crate-ci/typos/releases)
- [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crate-ci/typos/compare/v1.15.10...v1.16.0)
---
updated-dependencies:
- dependency-name: crate-ci/typos
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/SpellCheck.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
index 90bd9366b50..bb5a32f72ee 100644
--- a/.github/workflows/SpellCheck.yml
+++ b/.github/workflows/SpellCheck.yml
@@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v3
- name: Check spelling
- uses: crate-ci/typos@v1.15.10
+ uses: crate-ci/typos@v1.16.0
From 5ff677c1d246e7a500ab845201bc328d1d3bde92 Mon Sep 17 00:00:00 2001
From: Lars Christmann
Date: Tue, 11 Jul 2023 18:53:25 +0200
Subject: [PATCH 06/40] Implement upwind flux for linearized Euler equations
(#1557)
* Enable input checks for LEE keyword constructor
* Extend LEE implementation to curved meshes
* Implement upwind flux for linearized Euler equations
* Add upwind flux examples and tests
* Fix comments in linearized Euler elixirs
* Clarify LEE Gaussian source elixir
* Rename `flux_upwind` to `flux_godunov`
* Add parentheses around multiline expressions
* Add consistency checks for LEE Godunov flux
* Explain odd mean values in more detail
* Use normalized normal vector to simplify flux
* Add docstring for LEE upwind flux
* Update examples/p4est_2d_dgsem/elixir_linearizedeuler_gaussian_source.jl
Co-authored-by: Michael Schlottke-Lakemper
---------
Co-authored-by: Michael Schlottke-Lakemper
---
.../elixir_linearizedeuler_gaussian_source.jl | 89 ++++++++
.../elixir_linearizedeuler_gauss_wall.jl | 68 ++++++
src/equations/linearized_euler_2d.jl | 212 +++++++++++++++++-
test/test_p4est_2d.jl | 5 +
test/test_tree_2d_linearizedeuler.jl | 6 +
test/test_unit.jl | 20 ++
6 files changed, 399 insertions(+), 1 deletion(-)
create mode 100644 examples/p4est_2d_dgsem/elixir_linearizedeuler_gaussian_source.jl
create mode 100644 examples/tree_2d_dgsem/elixir_linearizedeuler_gauss_wall.jl
diff --git a/examples/p4est_2d_dgsem/elixir_linearizedeuler_gaussian_source.jl b/examples/p4est_2d_dgsem/elixir_linearizedeuler_gaussian_source.jl
new file mode 100644
index 00000000000..ba2ec827778
--- /dev/null
+++ b/examples/p4est_2d_dgsem/elixir_linearizedeuler_gaussian_source.jl
@@ -0,0 +1,89 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+# Based on the TreeMesh example `elixir_acoustics_gaussian_source.jl`.
+# The acoustic perturbation equations have been replaced with the linearized Euler
+# equations and instead of the Cartesian `TreeMesh` a rotated `P4estMesh` is used
+
+# Oscillating Gaussian-shaped source terms
+function source_terms_gauss(u, x, t, equations::LinearizedEulerEquations2D)
+ r = 0.1
+ A = 1.0
+ f = 2.0
+
+ # Velocity sources
+ s2 = 0.0
+ s3 = 0.0
+ # Density and pressure source
+ s1 = s4 = exp(-(x[1]^2 + x[2]^2) / (2 * r^2)) * A * sin(2 * pi * f * t)
+
+ return SVector(s1, s2, s3, s4)
+end
+
+initial_condition_zero(x, t, equations::LinearizedEulerEquations2D) = SVector(0.0, 0.0, 0.0, 0.0)
+
+###############################################################################
+# semidiscretization of the linearized Euler equations
+
+# Create a domain that is a 30° rotated version of [-3, 3]^2
+c = cospi(2 * 30.0 / 360.0)
+s = sinpi(2 * 30.0 / 360.0)
+rot_mat = Trixi.SMatrix{2, 2}([c -s; s c])
+mapping(xi, eta) = rot_mat * SVector(3.0*xi, 3.0*eta)
+
+# Mean density and speed of sound are slightly off from 1.0 to allow proper verification of
+# curved LEE implementation using this elixir (some things in the LEE cancel if both are 1.0)
+equations = LinearizedEulerEquations2D(v_mean_global=Tuple(rot_mat * SVector(-0.5, 0.25)),
+ c_mean_global=1.02, rho_mean_global=1.01)
+
+initial_condition = initial_condition_zero
+
+# Create DG solver with polynomial degree = 3 and upwind flux as surface flux
+solver = DGSEM(polydeg=3, surface_flux=flux_godunov)
+
+# Create a uniformly refined mesh with periodic boundaries
+trees_per_dimension = (4, 4)
+mesh = P4estMesh(trees_per_dimension, polydeg=1,
+ mapping=mapping,
+ periodicity=true, initial_refinement_level=2)
+
+# A semidiscretization collects data structures and functions for the spatial discretization
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ source_terms=source_terms_gauss)
+
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 2.0
+tspan = (0.0, 2.0)
+ode = semidiscretize(semi, tspan)
+
+# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup
+# and resets the timers
+summary_callback = SummaryCallback()
+
+# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results
+analysis_callback = AnalysisCallback(semi, interval=100)
+
+# The SaveSolutionCallback allows to save the solution to a file in regular intervals
+save_solution = SaveSolutionCallback(interval=100)
+
+# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl=0.5)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback)
+
+
+###############################################################################
+# run the simulation
+
+# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+
+# Print the timer summary
+summary_callback()
diff --git a/examples/tree_2d_dgsem/elixir_linearizedeuler_gauss_wall.jl b/examples/tree_2d_dgsem/elixir_linearizedeuler_gauss_wall.jl
new file mode 100644
index 00000000000..14fe201a291
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_linearizedeuler_gauss_wall.jl
@@ -0,0 +1,68 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the linearized Euler equations
+
+equations = LinearizedEulerEquations2D(v_mean_global=(0.5, 0.0), c_mean_global=1.0,
+ rho_mean_global=1.0)
+
+# Create DG solver with polynomial degree = 5 and upwind flux as surface flux
+solver = DGSEM(polydeg=5, surface_flux=flux_godunov)
+
+coordinates_min = (-100.0, 0.0) # minimum coordinates (min(x), min(y))
+coordinates_max = (100.0, 200.0) # maximum coordinates (max(x), max(y))
+
+# Create a uniformly refined mesh with periodic boundaries
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=4,
+ n_cells_max=100_000,
+ periodicity=false)
+
+function initial_condition_gauss_wall(x, t, equations::LinearizedEulerEquations2D)
+ v1_prime = 0.0
+ v2_prime = 0.0
+ rho_prime = p_prime = exp(-log(2) * (x[1]^2 + (x[2] - 25)^2) / 25)
+ return SVector(rho_prime, v1_prime, v2_prime, p_prime)
+end
+initial_condition = initial_condition_gauss_wall
+
+# A semidiscretization collects data structures and functions for the spatial discretization
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions=boundary_condition_wall)
+
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 30.0
+tspan = (0.0, 30.0)
+ode = semidiscretize(semi, tspan)
+
+# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup
+# and resets the timers
+summary_callback = SummaryCallback()
+
+# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results
+analysis_callback = AnalysisCallback(semi, interval=100)
+
+# The SaveSolutionCallback allows to save the solution to a file in regular intervals
+save_solution = SaveSolutionCallback(interval=100)
+
+# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl=0.7)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks)
+
+# Print the timer summary
+summary_callback()
diff --git a/src/equations/linearized_euler_2d.jl b/src/equations/linearized_euler_2d.jl
index cd681365cae..e478c32bd29 100644
--- a/src/equations/linearized_euler_2d.jl
+++ b/src/equations/linearized_euler_2d.jl
@@ -53,7 +53,7 @@ end
function LinearizedEulerEquations2D(; v_mean_global::NTuple{2, <:Real},
c_mean_global::Real, rho_mean_global::Real)
- return LinearizedEulerEquations2D(SVector(v_mean_global), c_mean_global,
+ return LinearizedEulerEquations2D(v_mean_global, c_mean_global,
rho_mean_global)
end
@@ -126,6 +126,24 @@ end
return SVector(f1, f2, f3, f4)
end
+# Calculate 1D flux for a single point
+@inline function flux(u, normal_direction::AbstractVector,
+ equations::LinearizedEulerEquations2D)
+ @unpack v_mean_global, c_mean_global, rho_mean_global = equations
+ rho_prime, v1_prime, v2_prime, p_prime = u
+
+ v_mean_normal = v_mean_global[1] * normal_direction[1] +
+ v_mean_global[2] * normal_direction[2]
+ v_prime_normal = v1_prime * normal_direction[1] + v2_prime * normal_direction[2]
+
+ f1 = v_mean_normal * rho_prime + rho_mean_global * v_prime_normal
+ f2 = v_mean_normal * v1_prime + normal_direction[1] * p_prime / rho_mean_global
+ f3 = v_mean_normal * v2_prime + normal_direction[2] * p_prime / rho_mean_global
+ f4 = v_mean_normal * p_prime + c_mean_global^2 * rho_mean_global * v_prime_normal
+
+ return SVector(f1, f2, f3, f4)
+end
+
@inline have_constant_speed(::LinearizedEulerEquations2D) = True()
@inline function max_abs_speeds(equations::LinearizedEulerEquations2D)
@@ -143,6 +161,198 @@ end
end
end
+@inline function max_abs_speed_naive(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::LinearizedEulerEquations2D)
+ @unpack v_mean_global, c_mean_global = equations
+ v_mean_normal = normal_direction[1] * v_mean_global[1] +
+ normal_direction[2] * v_mean_global[2]
+ return abs(v_mean_normal) + c_mean_global * norm(normal_direction)
+end
+
+@doc raw"""
+ flux_godunov(u_ll, u_rr, orientation_or_normal_direction,
+ equations::LinearizedEulerEquations2D)
+
+An upwind flux for the linearized Euler equations based on diagonalization of the physical
+flux matrix. Given the physical flux ``Au``, ``A=T \Lambda T^{-1}`` with
+``\Lambda`` being a diagonal matrix that holds the eigenvalues of ``A``, decompose
+``\Lambda = \Lambda^+ + \Lambda^-`` where ``\Lambda^+`` and ``\Lambda^-`` are diagonal
+matrices holding the positive and negative eigenvalues of ``A``, respectively. Then for
+left and right states ``u_L, u_R``, the numerical flux calculated by this function is given
+by ``A^+ u_L + A^- u_R`` where ``A^{\pm} = T \Lambda^{\pm} T^{-1}``.
+
+The diagonalization of the flux matrix can be found in
+- R. F. Warming, Richard M. Beam and B. J. Hyett (1975)
+ Diagonalization and simultaneous symmetrization of the gas-dynamic matrices
+ [DOI: 10.1090/S0025-5718-1975-0388967-5](https://doi.org/10.1090/S0025-5718-1975-0388967-5)
+"""
+@inline function flux_godunov(u_ll, u_rr, orientation::Integer,
+ equations::LinearizedEulerEquations2D)
+ @unpack v_mean_global, rho_mean_global, c_mean_global = equations
+ v1_mean = v_mean_global[1]
+ v2_mean = v_mean_global[2]
+
+ rho_prime_ll, v1_prime_ll, v2_prime_ll, p_prime_ll = u_ll
+ rho_prime_rr, v1_prime_rr, v2_prime_rr, p_prime_rr = u_rr
+
+ if orientation == 1
+ # Eigenvalues of the flux matrix
+ lambda1 = v1_mean
+ lambda2 = v1_mean - c_mean_global
+ lambda3 = v1_mean + c_mean_global
+
+ lambda1_p = positive_part(lambda1)
+ lambda2_p = positive_part(lambda2)
+ lambda3_p = positive_part(lambda3)
+ lambda2p3_half_p = 0.5 * (lambda2_p + lambda3_p)
+ lambda3m2_half_p = 0.5 * (lambda3_p - lambda2_p)
+
+ lambda1_m = negative_part(lambda1)
+ lambda2_m = negative_part(lambda2)
+ lambda3_m = negative_part(lambda3)
+ lambda2p3_half_m = 0.5 * (lambda2_m + lambda3_m)
+ lambda3m2_half_m = 0.5 * (lambda3_m - lambda2_m)
+
+ f1p = (lambda1_p * rho_prime_ll +
+ lambda3m2_half_p / c_mean_global * rho_mean_global * v1_prime_ll +
+ (lambda2p3_half_p - lambda1_p) / c_mean_global^2 * p_prime_ll)
+ f2p = (lambda2p3_half_p * v1_prime_ll +
+ lambda3m2_half_p / c_mean_global * p_prime_ll / rho_mean_global)
+ f3p = lambda1_p * v2_prime_ll
+ f4p = (lambda3m2_half_p * c_mean_global * rho_mean_global * v1_prime_ll +
+ lambda2p3_half_p * p_prime_ll)
+
+ f1m = (lambda1_m * rho_prime_rr +
+ lambda3m2_half_m / c_mean_global * rho_mean_global * v1_prime_rr +
+ (lambda2p3_half_m - lambda1_m) / c_mean_global^2 * p_prime_rr)
+ f2m = (lambda2p3_half_m * v1_prime_rr +
+ lambda3m2_half_m / c_mean_global * p_prime_rr / rho_mean_global)
+ f3m = lambda1_m * v2_prime_rr
+ f4m = (lambda3m2_half_m * c_mean_global * rho_mean_global * v1_prime_rr +
+ lambda2p3_half_m * p_prime_rr)
+
+ f1 = f1p + f1m
+ f2 = f2p + f2m
+ f3 = f3p + f3m
+ f4 = f4p + f4m
+ else # orientation == 2
+ # Eigenvalues of the flux matrix
+ lambda1 = v2_mean
+ lambda2 = v2_mean - c_mean_global
+ lambda3 = v2_mean + c_mean_global
+
+ lambda1_p = positive_part(lambda1)
+ lambda2_p = positive_part(lambda2)
+ lambda3_p = positive_part(lambda3)
+ lambda2p3_half_p = 0.5 * (lambda2_p + lambda3_p)
+ lambda3m2_half_p = 0.5 * (lambda3_p - lambda2_p)
+
+ lambda1_m = negative_part(lambda1)
+ lambda2_m = negative_part(lambda2)
+ lambda3_m = negative_part(lambda3)
+ lambda2p3_half_m = 0.5 * (lambda2_m + lambda3_m)
+ lambda3m2_half_m = 0.5 * (lambda3_m - lambda2_m)
+
+ f1p = (lambda1_p * rho_prime_ll +
+ lambda3m2_half_p / c_mean_global * rho_mean_global * v2_prime_ll +
+ (lambda2p3_half_p - lambda1_p) / c_mean_global^2 * p_prime_ll)
+ f2p = lambda1_p * v1_prime_ll
+ f3p = (lambda2p3_half_p * v2_prime_ll +
+ lambda3m2_half_p / c_mean_global * p_prime_ll / rho_mean_global)
+ f4p = (lambda3m2_half_p * c_mean_global * rho_mean_global * v2_prime_ll +
+ lambda2p3_half_p * p_prime_ll)
+
+ f1m = (lambda1_m * rho_prime_rr +
+ lambda3m2_half_m / c_mean_global * rho_mean_global * v2_prime_rr +
+ (lambda2p3_half_m - lambda1_m) / c_mean_global^2 * p_prime_rr)
+ f2m = lambda1_m * v1_prime_rr
+ f3m = (lambda2p3_half_m * v2_prime_rr +
+ lambda3m2_half_m / c_mean_global * p_prime_rr / rho_mean_global)
+ f4m = (lambda3m2_half_m * c_mean_global * rho_mean_global * v2_prime_rr +
+ lambda2p3_half_m * p_prime_rr)
+
+ f1 = f1p + f1m
+ f2 = f2p + f2m
+ f3 = f3p + f3m
+ f4 = f4p + f4m
+ end
+
+ return SVector(f1, f2, f3, f4)
+end
+
+@inline function flux_godunov(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::LinearizedEulerEquations2D)
+ @unpack v_mean_global, rho_mean_global, c_mean_global = equations
+ rho_prime_ll, v1_prime_ll, v2_prime_ll, p_prime_ll = u_ll
+ rho_prime_rr, v1_prime_rr, v2_prime_rr, p_prime_rr = u_rr
+
+ # Do not use `normalize` since we use `norm_` later to scale the eigenvalues
+ norm_ = norm(normal_direction)
+ normal_vector = normal_direction / norm_
+
+ # Use normalized vector here, scaling is applied via eigenvalues of the flux matrix
+ v_mean_normal = v_mean_global[1] * normal_vector[1] +
+ v_mean_global[2] * normal_vector[2]
+ v_prime_normal_ll = v1_prime_ll * normal_vector[1] + v2_prime_ll * normal_vector[2]
+ v_prime_normal_rr = v1_prime_rr * normal_vector[1] + v2_prime_rr * normal_vector[2]
+
+ # Eigenvalues of the flux matrix
+ lambda1 = v_mean_normal * norm_
+ lambda2 = (v_mean_normal - c_mean_global) * norm_
+ lambda3 = (v_mean_normal + c_mean_global) * norm_
+
+ lambda1_p = positive_part(lambda1)
+ lambda2_p = positive_part(lambda2)
+ lambda3_p = positive_part(lambda3)
+ lambda2p3_half_p = 0.5 * (lambda2_p + lambda3_p)
+ lambda3m2_half_p = 0.5 * (lambda3_p - lambda2_p)
+
+ lambda1_m = negative_part(lambda1)
+ lambda2_m = negative_part(lambda2)
+ lambda3_m = negative_part(lambda3)
+ lambda2p3_half_m = 0.5 * (lambda2_m + lambda3_m)
+ lambda3m2_half_m = 0.5 * (lambda3_m - lambda2_m)
+
+ f1p = (lambda1_p * rho_prime_ll +
+ lambda3m2_half_p / c_mean_global * rho_mean_global * v_prime_normal_ll +
+ (lambda2p3_half_p - lambda1_p) / c_mean_global^2 * p_prime_ll)
+ f2p = (((lambda1_p * normal_vector[2]^2 +
+ lambda2p3_half_p * normal_vector[1]^2) * v1_prime_ll +
+ (lambda2p3_half_p - lambda1_p) * prod(normal_vector) * v2_prime_ll) +
+ lambda3m2_half_p / c_mean_global * normal_vector[1] * p_prime_ll /
+ rho_mean_global)
+ f3p = (((lambda1_p * normal_vector[1]^2 +
+ lambda2p3_half_p * normal_vector[2]^2) * v2_prime_ll +
+ (lambda2p3_half_p - lambda1_p) * prod(normal_vector) * v1_prime_ll) +
+ lambda3m2_half_p / c_mean_global * normal_vector[2] * p_prime_ll /
+ rho_mean_global)
+ f4p = (lambda3m2_half_p * c_mean_global * rho_mean_global * v_prime_normal_ll +
+ lambda2p3_half_p * p_prime_ll)
+
+ f1m = (lambda1_m * rho_prime_rr +
+ lambda3m2_half_m / c_mean_global * rho_mean_global * v_prime_normal_rr +
+ (lambda2p3_half_m - lambda1_m) / c_mean_global^2 * p_prime_rr)
+ f2m = (((lambda1_m * normal_vector[2]^2 +
+ lambda2p3_half_m * normal_vector[1]^2) * v1_prime_rr +
+ (lambda2p3_half_m - lambda1_m) * prod(normal_vector) * v2_prime_rr) +
+ lambda3m2_half_m / c_mean_global * normal_vector[1] * p_prime_rr /
+ rho_mean_global)
+ f3m = (((lambda1_m * normal_vector[1]^2 +
+ lambda2p3_half_m * normal_vector[2]^2) * v2_prime_rr +
+ (lambda2p3_half_m - lambda1_m) * prod(normal_vector) * v1_prime_rr) +
+ lambda3m2_half_m / c_mean_global * normal_vector[2] * p_prime_rr /
+ rho_mean_global)
+ f4m = (lambda3m2_half_m * c_mean_global * rho_mean_global * v_prime_normal_rr +
+ lambda2p3_half_m * p_prime_rr)
+
+ f1 = f1p + f1m
+ f2 = f2p + f2m
+ f3 = f3p + f3m
+ f4 = f4p + f4m
+
+ return SVector(f1, f2, f3, f4)
+end
+
# Convert conservative variables to primitive
@inline cons2prim(u, equations::LinearizedEulerEquations2D) = u
@inline cons2entropy(u, ::LinearizedEulerEquations2D) = u
diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl
index f66664c7a89..c4ce2619e15 100644
--- a/test/test_p4est_2d.jl
+++ b/test/test_p4est_2d.jl
@@ -164,6 +164,11 @@ isdir(outdir) && rm(outdir, recursive=true)
tspan = (0.0, 0.02))
end
+ @trixi_testset "elixir_linearizedeuler_gaussian_source.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_linearizedeuler_gaussian_source.jl"),
+ l2 = [0.006047938590548741, 0.0040953286019907035, 0.004222698522497298, 0.006269492499336128],
+ linf = [0.06386175207349379, 0.0378926444850457, 0.041759728067967065, 0.06430136016259067])
+ end
end
# Clean up afterwards: delete Trixi.jl output directory
diff --git a/test/test_tree_2d_linearizedeuler.jl b/test/test_tree_2d_linearizedeuler.jl
index 540b3951212..2c5f6dc2cd1 100644
--- a/test/test_tree_2d_linearizedeuler.jl
+++ b/test/test_tree_2d_linearizedeuler.jl
@@ -13,4 +13,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
linf = [0.0011006084408365924, 0.0005788678074691855, 0.0005788678074701847, 0.0011006084408365924]
)
end
+
+ @trixi_testset "elixir_linearizedeuler_gauss_wall.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_linearizedeuler_gauss_wall.jl"),
+ l2 = [0.048185623945503485, 0.01941899333212175, 0.019510224816991825, 0.048185623945503485],
+ linf = [1.0392165942153189, 0.18188777290819994, 0.1877028372108587, 1.0392165942153189])
+ end
end
diff --git a/test/test_unit.jl b/test/test_unit.jl
index 2156e9bac32..b0c3e4205e5 100644
--- a/test/test_unit.jl
+++ b/test/test_unit.jl
@@ -670,6 +670,26 @@ isdir(outdir) && rm(outdir, recursive=true)
for normal_direction in normal_directions
@test flux_godunov(u, u, normal_direction, equation) ≈ flux(u, normal_direction, equation)
end
+
+ # Linearized Euler 2D
+ equation = LinearizedEulerEquations2D(v_mean_global=(0.5, -0.7), c_mean_global=1.1,
+ rho_mean_global=1.2)
+ u_values = [SVector(1.0, 0.5, -0.7, 1.0),
+ SVector(1.5, -0.2, 0.1, 5.0),]
+
+ orientations = [1, 2]
+ for orientation in orientations, u in u_values
+ @test flux_godunov(u, u, orientation, equation) ≈ flux(u, orientation, equation)
+ end
+
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+
+ for normal_direction in normal_directions, u in u_values
+ @test flux_godunov(u, u, normal_direction, equation) ≈ flux(u, normal_direction, equation)
+ end
end
@timed_testset "Consistency check for Engquist-Osher flux" begin
From 42732dbd09b21c2e0237ba3f004469b94f3d5600 Mon Sep 17 00:00:00 2001
From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com>
Date: Tue, 11 Jul 2023 22:44:39 +0100
Subject: [PATCH 07/40] Added load_timestep function. (#1528)
* Added load_timestep function.
Corrected time index in restart simulations.
* Changed doc string for load_timestep to clarify that we read the iteration number.
* Added reading function for dt.
Changed restart example elixir such that they use dt from previous simulation.
* Get attribute 'current_filename' when loading an existing mesh.
This fixes issues with converting from hdf5 into vtk
when rerunning a simulation.
* format
* Update make.jl to include restart simulation documentation.
* Create restart.md.
* Added unformatted docs on how to restart a simulation from an old snapshot.
* Completed restart tutorial.
* Fixed a few typos in the docs for restarting a simulation.
* Minor typo.
* Added myself to the contributor list.
* Update docs/src/restart.md
Co-authored-by: Hendrik Ranocha
* Update docs/src/restart.md
Co-authored-by: Hendrik Ranocha
* Update docs/src/restart.md
Co-authored-by: Hendrik Ranocha
* Update docs/src/restart.md
Co-authored-by: Hendrik Ranocha
* Update docs/src/restart.md
Co-authored-by: Michael Schlottke-Lakemper
* Update docs/src/restart.md
Co-authored-by: Michael Schlottke-Lakemper
* Update restart.md
Added a few links to the restart documentation.
* Update docs/src/restart.md
Co-authored-by: Hendrik Ranocha
* Corrected reference file name.
* Added reference to save solution callback.
---------
Co-authored-by: Hendrik Ranocha
Co-authored-by: Michael Schlottke-Lakemper
Co-authored-by: Hendrik Ranocha
---
AUTHORS.md | 1 +
docs/make.jl | 1 +
docs/src/restart.md | 89 +++++++++++++++++++
.../elixir_advection_restart.jl | 16 +++-
.../elixir_advection_restart.jl | 16 +++-
.../elixir_advection_restart.jl | 15 +++-
.../elixir_advection_restart.jl | 16 +++-
.../tree_2d_dgsem/elixir_advection_restart.jl | 16 +++-
.../tree_3d_dgsem/elixir_advection_restart.jl | 16 +++-
.../elixir_euler_restart.jl | 16 +++-
src/Trixi.jl | 2 +-
src/callbacks_step/save_restart.jl | 22 +++++
src/meshes/mesh_io.jl | 1 +
13 files changed, 205 insertions(+), 22 deletions(-)
create mode 100644 docs/src/restart.md
diff --git a/AUTHORS.md b/AUTHORS.md
index 973e311920b..abaa3e7e037 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -24,6 +24,7 @@ are listed in alphabetical order:
* Maximilian D. Bertrand
* Benjamin Bolm
+* Simon Candelaresi
* Jesse Chan
* Lars Christmann
* Christof Czernik
diff --git a/docs/make.jl b/docs/make.jl
index 5069e4dc49a..57629577ddb 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -92,6 +92,7 @@ makedocs(
"Getting started" => [
"Overview" => "overview.md",
"Visualization" => "visualization.md",
+ "Restart simulation" => "restart.md",
],
"Tutorials" => tutorials,
"Basic building blocks" => [
diff --git a/docs/src/restart.md b/docs/src/restart.md
new file mode 100644
index 00000000000..d24d93cb297
--- /dev/null
+++ b/docs/src/restart.md
@@ -0,0 +1,89 @@
+# [Restart simulation](@id restart)
+
+You can continue running an already finished simulation by first
+preparing the simulation for the restart and then performing the restart.
+Here we suppose that in the first run your simulation stops at time 1.0
+and then you want it to run further to time 2.0.
+
+## [Prepare the simulation for a restart](@id restart_preparation)
+In you original elixir you need to specify to write out restart files.
+Those will later be read for the restart of your simulation.
+This is done almost the same way as writing the snapshots using the
+[`SaveSolutionCallback`](@ref) callback.
+For the restart files it is called [`SaveRestartCallback`](@ref):
+```julia
+save_restart = SaveRestartCallback(interval=100,
+ save_final_restart=true)
+```
+Make this part of your `CallbackSet`.
+
+An example is
+[```examples/examples/structured_2d_dgsem/elixir_advection_extended.jl```](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_extended.jl).
+
+
+## [Perform the simulation restart](@id restart_perform)
+Since all of the information about the simulation can be obtained from the
+last snapshot, the restart can be done with relatively few lines
+in an extra elixir file.
+However, some might prefer to keep everything in one elixir and
+conditionals like ```if restart``` with a boolean variable ```restart``` that is user defined.
+
+First we need to define from which file we want to restart, e.g.
+```julia
+restart_file = "restart_000021.h5"
+restart_filename = joinpath("out", restart_file)
+```
+
+Then we load the mesh file:
+```julia
+mesh = load_mesh(restart_filename)
+```
+
+This is then needed for the semidiscretization:
+```julia
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+```
+
+We then define a new time span for the simulation that takes as starting
+time the one form the snapshot:
+```julia
+tspan = (load_time(restart_filename), 2.0)
+```
+
+We now also take the last ```dt```, so that our solver does not need to first find
+one to fulfill the CFL condition:
+```julia
+dt = load_dt(restart_filename)
+```
+
+The ODE that we will pass to the solver is now:
+```julia
+ode = semidiscretize(semi, tspan, restart_filename)
+```
+
+You should now define a [`SaveSolutionCallback`](@ref) similar to the
+[original simulation](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_extended.jl),
+but with ```save_initial_solution=false```, otherwise our initial snapshot will be overwritten.
+If you are using one file for the original simulation and the restart
+you can reuse your [`SaveSolutionCallback`](@ref), but need to set
+```julia
+save_solution.condition.save_initial_solution = false
+```
+
+Before we compute the solution using
+[OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl)
+we need to set the integrator
+and its time step number, e.g.:
+```julia
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, save_everystep=false, callback=callbacks);
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
+```
+
+Now we can compute the solution:
+```julia
+sol = solve!(integrator)
+```
+
+An example is in `[``examples/structured_2d_dgsem/elixir_advection_restart.jl```](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/structured_2d_dgsem/elixir_advection_restart.jl).
diff --git a/examples/p4est_2d_dgsem/elixir_advection_restart.jl b/examples/p4est_2d_dgsem/elixir_advection_restart.jl
index 1906fb2896e..79a35199b83 100644
--- a/examples/p4est_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/p4est_2d_dgsem/elixir_advection_restart.jl
@@ -24,13 +24,23 @@ semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
boundary_conditions=boundary_conditions)
tspan = (load_time(restart_filename), 2.0)
+dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+
+# Get the last time index and work with that.
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
+
###############################################################################
# run the simulation
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
- dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+sol = solve!(integrator)
summary_callback() # print the timer summary
diff --git a/examples/p4est_3d_dgsem/elixir_advection_restart.jl b/examples/p4est_3d_dgsem/elixir_advection_restart.jl
index 71b37e9f39b..b27eaab62e2 100644
--- a/examples/p4est_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/p4est_3d_dgsem/elixir_advection_restart.jl
@@ -21,13 +21,23 @@ mesh = load_mesh(restart_filename)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, solver)
tspan = (load_time(restart_filename), 2.0)
+dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+
+# Get the last time index and work with that.
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
+
###############################################################################
# run the simulation
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
- dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+sol = solve!(integrator)
summary_callback() # print the timer summary
diff --git a/examples/structured_2d_dgsem/elixir_advection_restart.jl b/examples/structured_2d_dgsem/elixir_advection_restart.jl
index 2c2a0ef8f51..98c44fac71a 100644
--- a/examples/structured_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/structured_2d_dgsem/elixir_advection_restart.jl
@@ -23,13 +23,22 @@ mesh = load_mesh(restart_filename)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
tspan = (load_time(restart_filename), 2.0)
+dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+
+# Get the last time index and work with that.
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
###############################################################################
# run the simulation
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
- dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+sol = solve!(integrator)
summary_callback() # print the timer summary
diff --git a/examples/structured_3d_dgsem/elixir_advection_restart.jl b/examples/structured_3d_dgsem/elixir_advection_restart.jl
index 39e1a675167..39d28848c77 100644
--- a/examples/structured_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/structured_3d_dgsem/elixir_advection_restart.jl
@@ -21,13 +21,23 @@ mesh = load_mesh(restart_filename)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, solver)
tspan = (load_time(restart_filename), 2.0)
+dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+
+# Get the last time index and work with that.
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
+
###############################################################################
# run the simulation
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
- dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+sol = solve!(integrator)
summary_callback() # print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart.jl b/examples/tree_2d_dgsem/elixir_advection_restart.jl
index 2cb45c0b47e..4ceb5932573 100644
--- a/examples/tree_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_restart.jl
@@ -20,13 +20,23 @@ mesh = load_mesh(restart_filename)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
tspan = (load_time(restart_filename), 2.0)
+dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks)
+
+# Get the last time index and work with that.
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
###############################################################################
# run the simulation
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
- dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+sol = solve!(integrator)
+
summary_callback() # print the timer summary
diff --git a/examples/tree_3d_dgsem/elixir_advection_restart.jl b/examples/tree_3d_dgsem/elixir_advection_restart.jl
index 83bf4418b98..3061f165874 100644
--- a/examples/tree_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_3d_dgsem/elixir_advection_restart.jl
@@ -20,13 +20,23 @@ mesh = load_mesh(restart_filename)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
tspan = (load_time(restart_filename), 2.0)
+dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+
+# Get the last time index and work with that.
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
+
###############################################################################
# run the simulation
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
- dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+sol = solve!(integrator)
summary_callback() # print the timer summary
diff --git a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
index 2ac67652023..b85cc2c6d70 100644
--- a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
+++ b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
@@ -22,14 +22,24 @@ semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
boundary_conditions=boundary_conditions)
tspan = (load_time(restart_filename), 1.0)
+dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+
+# Get the last time index and work with that.
+integrator.iter = load_timestep(restart_filename)
+integrator.stats.naccept = integrator.iter
+
###############################################################################
# run the simulation
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
- dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+sol = solve!(integrator)
summary_callback() # print the timer summary
diff --git a/src/Trixi.jl b/src/Trixi.jl
index 66878f4b459..6fc62f50520 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -241,7 +241,7 @@ export SummaryCallback, SteadyStateCallback, AnalysisCallback, AliveCallback,
GlmSpeedCallback, LBMCollisionCallback, EulerAcousticsCouplingCallback,
TrivialCallback, AnalysisCallbackCoupled
-export load_mesh, load_time
+export load_mesh, load_time, load_timestep, load_dt
export ControllerThreeLevel, ControllerThreeLevelCombined,
IndicatorLöhner, IndicatorLoehner, IndicatorMax,
diff --git a/src/callbacks_step/save_restart.jl b/src/callbacks_step/save_restart.jl
index e23f58f26ea..f567a5c7fda 100644
--- a/src/callbacks_step/save_restart.jl
+++ b/src/callbacks_step/save_restart.jl
@@ -130,6 +130,28 @@ function load_time(restart_file::AbstractString)
end
end
+"""
+ load_timestep(restart_file::AbstractString)
+
+Load the time step number (`iter` in OrdinaryDiffEq.jl) saved in a `restart_file`.
+"""
+function load_timestep(restart_file::AbstractString)
+ h5open(restart_file, "r") do file
+ read(attributes(file)["timestep"])
+ end
+end
+
+"""
+ load_dt(restart_file::AbstractString)
+
+Load the time step size (`dt` in OrdinaryDiffEq.jl) saved in a `restart_file`.
+"""
+function load_dt(restart_file::AbstractString)
+ h5open(restart_file, "r") do file
+ read(attributes(file)["dt"])
+ end
+end
+
function load_restart_file(semi::AbstractSemidiscretization, restart_file)
load_restart_file(mesh_equations_solver_cache(semi)..., restart_file)
end
diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl
index ede85d80106..da67fe23e0e 100644
--- a/src/meshes/mesh_io.jl
+++ b/src/meshes/mesh_io.jl
@@ -286,6 +286,7 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT)
mesh = StructuredMesh(size, mapping; RealT = RealT, unsaved_changes = false,
mapping_as_string = mapping_as_string)
+ mesh.current_filename = mesh_file
elseif mesh_type == "UnstructuredMesh2D"
mesh_filename, periodicity_ = h5open(mesh_file, "r") do file
return read(attributes(file)["mesh_filename"]),
From a191e39b0d6bc75616aa2113b446b68e3b65ee41 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Thu, 13 Jul 2023 20:28:55 +0200
Subject: [PATCH 08/40] Hll 2 wave improvements non breaking (#1561)
* Add Classical and Naive HLL 2 Wave solver to classic Hyperbolic PDEs
* Format Code
* HLLE wave speeds for SWE
* Fix typos
* Update tests for HLL
* Unit test 1D MHD HLL, HLLE
* Add example for classical HLL 2 wave
* remove plots
* Use lowercase for flux
* Use einfeldt for mhd
* Use hlle for mhd tets
* Missing comma causes failing tests
* Correct bug in SWE 2D Roe eigval comp, unit tests
* format
* Revert "format"
This reverts commit 047a5e75b4a5ee4a0f58a7979d58b26f15f24334.
* format equations
* Add unit tests for HLL naive
* Revert default hll flux
* Rename min_max_speed to min_max_speed_davis and reduce documentation
* Update src/equations/shallow_water_1d.jl: Comments
Co-authored-by: Hendrik Ranocha
* Add published resource for Roe averages for SWE
* Add tests for rotation
* Remove breaking portionv from PR
* fix copy paste error
* Lowercase davis
* Update src/equations/numerical_fluxes.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/numerical_fluxes.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/numerical_fluxes.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/numerical_fluxes.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/numerical_fluxes.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/numerical_fluxes.jl
Co-authored-by: Hendrik Ranocha
* Update test/test_tree_2d_mhd.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/ideal_glm_mhd_1d.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/ideal_glm_mhd_2d.jl
Co-authored-by: Hendrik Ranocha
* Update src/equations/ideal_glm_mhd_3d.jl
Co-authored-by: Hendrik Ranocha
* Update test/test_tree_3d_mhd.jl
Co-authored-by: Hendrik Ranocha
* Remove hll_davis test
* Split consistency checks
* Try to resolve conflict with 5ff677c
* Add tests
* More tests
---------
Co-authored-by: Hendrik Ranocha
---
examples/dgmulti_2d/elixir_euler_bilinear.jl | 2 +-
examples/dgmulti_2d/elixir_euler_curved.jl | 2 +-
.../elixir_euler_triangulate_pkg_mesh.jl | 2 +-
examples/dgmulti_2d/elixir_euler_weakform.jl | 2 +-
.../elixir_euler_weakform_periodic.jl | 2 +-
examples/dgmulti_3d/elixir_euler_curved.jl | 2 +-
examples/dgmulti_3d/elixir_euler_weakform.jl | 2 +-
.../elixir_euler_weakform_periodic.jl | 2 +-
src/Trixi.jl | 2 +-
src/equations/compressible_euler_1d.jl | 19 +-
src/equations/compressible_euler_2d.jl | 43 +-
src/equations/compressible_euler_3d.jl | 50 ++-
src/equations/ideal_glm_mhd_1d.jl | 24 +-
src/equations/ideal_glm_mhd_2d.jl | 67 ++-
src/equations/ideal_glm_mhd_3d.jl | 71 ++++
src/equations/linearized_euler_2d.jl | 38 ++
src/equations/numerical_fluxes.jl | 45 ++-
src/equations/shallow_water_1d.jl | 66 ++-
src/equations/shallow_water_2d.jl | 147 ++++++-
test/test_structured_1d.jl | 8 +
test/test_unit.jl | 381 +++++++++++++++++-
21 files changed, 946 insertions(+), 31 deletions(-)
diff --git a/examples/dgmulti_2d/elixir_euler_bilinear.jl b/examples/dgmulti_2d/elixir_euler_bilinear.jl
index beb5c863971..bdd582610ea 100644
--- a/examples/dgmulti_2d/elixir_euler_bilinear.jl
+++ b/examples/dgmulti_2d/elixir_euler_bilinear.jl
@@ -2,7 +2,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Quad(), approximation_type = SBP(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha))
equations = CompressibleEulerEquations2D(1.4)
diff --git a/examples/dgmulti_2d/elixir_euler_curved.jl b/examples/dgmulti_2d/elixir_euler_curved.jl
index 4f1d613b247..a3ba62f1cfb 100644
--- a/examples/dgmulti_2d/elixir_euler_curved.jl
+++ b/examples/dgmulti_2d/elixir_euler_curved.jl
@@ -2,7 +2,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Quad(), approximation_type = SBP(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha))
equations = CompressibleEulerEquations2D(1.4)
diff --git a/examples/dgmulti_2d/elixir_euler_triangulate_pkg_mesh.jl b/examples/dgmulti_2d/elixir_euler_triangulate_pkg_mesh.jl
index 1f35a11bf8e..c10b5e46a14 100644
--- a/examples/dgmulti_2d/elixir_euler_triangulate_pkg_mesh.jl
+++ b/examples/dgmulti_2d/elixir_euler_triangulate_pkg_mesh.jl
@@ -1,7 +1,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Tri(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralWeakForm())
equations = CompressibleEulerEquations2D(1.4)
diff --git a/examples/dgmulti_2d/elixir_euler_weakform.jl b/examples/dgmulti_2d/elixir_euler_weakform.jl
index 1ecc666c8db..486a30b37f1 100644
--- a/examples/dgmulti_2d/elixir_euler_weakform.jl
+++ b/examples/dgmulti_2d/elixir_euler_weakform.jl
@@ -2,7 +2,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Tri(), approximation_type = Polynomial(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralWeakForm())
equations = CompressibleEulerEquations2D(1.4)
diff --git a/examples/dgmulti_2d/elixir_euler_weakform_periodic.jl b/examples/dgmulti_2d/elixir_euler_weakform_periodic.jl
index 48cc8070857..c4c83fff642 100644
--- a/examples/dgmulti_2d/elixir_euler_weakform_periodic.jl
+++ b/examples/dgmulti_2d/elixir_euler_weakform_periodic.jl
@@ -2,7 +2,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Tri(), approximation_type = Polynomial(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralWeakForm())
equations = CompressibleEulerEquations2D(1.4)
diff --git a/examples/dgmulti_3d/elixir_euler_curved.jl b/examples/dgmulti_3d/elixir_euler_curved.jl
index 339d6ce0186..d8c4df5dd64 100644
--- a/examples/dgmulti_3d/elixir_euler_curved.jl
+++ b/examples/dgmulti_3d/elixir_euler_curved.jl
@@ -2,7 +2,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Hex(), approximation_type=SBP(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha))
equations = CompressibleEulerEquations3D(1.4)
diff --git a/examples/dgmulti_3d/elixir_euler_weakform.jl b/examples/dgmulti_3d/elixir_euler_weakform.jl
index 4ad9f045eb6..b167377af51 100644
--- a/examples/dgmulti_3d/elixir_euler_weakform.jl
+++ b/examples/dgmulti_3d/elixir_euler_weakform.jl
@@ -2,7 +2,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Tet(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralWeakForm())
equations = CompressibleEulerEquations3D(1.4)
diff --git a/examples/dgmulti_3d/elixir_euler_weakform_periodic.jl b/examples/dgmulti_3d/elixir_euler_weakform_periodic.jl
index f554167df90..6b17d4bba65 100644
--- a/examples/dgmulti_3d/elixir_euler_weakform_periodic.jl
+++ b/examples/dgmulti_3d/elixir_euler_weakform_periodic.jl
@@ -2,7 +2,7 @@
using Trixi, OrdinaryDiffEq
dg = DGMulti(polydeg = 3, element_type = Tet(), approximation_type = Polynomial(),
- surface_integral = SurfaceIntegralWeakForm(FluxHLL()),
+ surface_integral = SurfaceIntegralWeakForm(flux_hll),
volume_integral = VolumeIntegralWeakForm())
equations = CompressibleEulerEquations3D(1.4)
diff --git a/src/Trixi.jl b/src/Trixi.jl
index 6fc62f50520..34a1977d4f5 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -164,7 +164,7 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle,
hydrostatic_reconstruction_audusse_etal, flux_nonconservative_audusse_etal,
FluxPlusDissipation, DissipationGlobalLaxFriedrichs, DissipationLocalLaxFriedrichs,
FluxLaxFriedrichs, max_abs_speed_naive,
- FluxHLL, min_max_speed_naive,
+ FluxHLL, min_max_speed_naive, min_max_speed_davis, min_max_speed_einfeldt,
FluxLMARS,
FluxRotated,
flux_shima_etal_turbo, flux_ranocha_turbo,
diff --git a/src/equations/compressible_euler_1d.jl b/src/equations/compressible_euler_1d.jl
index 15f7a2cb4c4..e4fd0997eae 100644
--- a/src/equations/compressible_euler_1d.jl
+++ b/src/equations/compressible_euler_1d.jl
@@ -628,7 +628,7 @@ end
return SVector(f1m, f2m, f3m)
end
-# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the
+# Calculate estimates for maximum wave speed for local Lax-Friedrichs-type dissipation as the
# maximum velocity magnitude plus the maximum speed of sound
@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer,
equations::CompressibleEulerEquations1D)
@@ -648,7 +648,7 @@ end
λ_max = max(v_mag_ll, v_mag_rr) + max(c_ll, c_rr)
end
-# Calculate minimum and maximum wave speeds for HLL-type fluxes
+# Calculate estimates for minimum and maximum wave speeds for HLL-type fluxes
@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
equations::CompressibleEulerEquations1D)
rho_ll, v1_ll, p_ll = cons2prim(u_ll, equations)
@@ -660,6 +660,21 @@ end
return λ_min, λ_max
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::CompressibleEulerEquations1D)
+ rho_ll, v1_ll, p_ll = cons2prim(u_ll, equations)
+ rho_rr, v1_rr, p_rr = cons2prim(u_rr, equations)
+
+ c_ll = sqrt(equations.gamma * p_ll / rho_ll)
+ c_rr = sqrt(equations.gamma * p_rr / rho_rr)
+
+ λ_min = min(v1_ll - c_ll, v1_rr - c_rr)
+ λ_max = max(v1_ll + c_ll, v1_rr + c_rr)
+
+ return λ_min, λ_max
+end
+
"""
flux_hllc(u_ll, u_rr, orientation, equations::CompressibleEulerEquations1D)
diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl
index 05987c510b8..27b92f41953 100644
--- a/src/equations/compressible_euler_2d.jl
+++ b/src/equations/compressible_euler_2d.jl
@@ -1032,7 +1032,7 @@ end
return max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) * norm(normal_direction)
end
-# Calculate minimum and maximum wave speeds for HLL-type fluxes
+# Calculate estimate for minimum and maximum wave speeds for HLL-type fluxes
@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
equations::CompressibleEulerEquations2D)
rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations)
@@ -1065,6 +1065,47 @@ end
return λ_min, λ_max
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::CompressibleEulerEquations2D)
+ rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations)
+ rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations)
+
+ c_ll = sqrt(equations.gamma * p_ll / rho_ll)
+ c_rr = sqrt(equations.gamma * p_rr / rho_rr)
+
+ if orientation == 1 # x-direction
+ λ_min = min(v1_ll - c_ll, v1_rr - c_rr)
+ λ_max = max(v1_ll + c_ll, v1_rr + c_rr)
+ else # y-direction
+ λ_min = min(v2_ll - c_ll, v2_rr - c_rr)
+ λ_max = max(v2_ll + c_ll, v2_rr + c_rr)
+ end
+
+ return λ_min, λ_max
+end
+
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::CompressibleEulerEquations2D)
+ rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations)
+ rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations)
+
+ norm_ = norm(normal_direction)
+
+ c_ll = sqrt(equations.gamma * p_ll / rho_ll) * norm_
+ c_rr = sqrt(equations.gamma * p_rr / rho_rr) * norm_
+
+ v_normal_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2]
+ v_normal_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
+
+ # The v_normals are already scaled by the norm
+ λ_min = min(v_normal_ll - c_ll, v_normal_rr - c_rr)
+ λ_max = max(v_normal_ll + c_ll, v_normal_rr + c_rr)
+
+ return λ_min, λ_max
+end
+
# Called inside `FluxRotated` in `numerical_fluxes.jl` so the direction
# has been normalized prior to this rotation of the state vector
@inline function rotate_to_x(u, normal_vector, equations::CompressibleEulerEquations2D)
diff --git a/src/equations/compressible_euler_3d.jl b/src/equations/compressible_euler_3d.jl
index 2085811f832..7f25bde31fd 100644
--- a/src/equations/compressible_euler_3d.jl
+++ b/src/equations/compressible_euler_3d.jl
@@ -1070,7 +1070,7 @@ end
return max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) * norm(normal_direction)
end
-# Calculate minimum and maximum wave speeds for HLL-type fluxes
+# Calculate estimates for minimum and maximum wave speeds for HLL-type fluxes
@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
equations::CompressibleEulerEquations3D)
rho_ll, v1_ll, v2_ll, v3_ll, p_ll = cons2prim(u_ll, equations)
@@ -1108,6 +1108,54 @@ end
return λ_min, λ_max
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::CompressibleEulerEquations3D)
+ rho_ll, v1_ll, v2_ll, v3_ll, p_ll = cons2prim(u_ll, equations)
+ rho_rr, v1_rr, v2_rr, v3_rr, p_rr = cons2prim(u_rr, equations)
+
+ c_ll = sqrt(equations.gamma * p_ll / rho_ll)
+ c_rr = sqrt(equations.gamma * p_rr / rho_rr)
+
+ if orientation == 1 # x-direction
+ λ_min = min(v1_ll - c_ll, v1_rr - c_rr)
+ λ_max = max(v1_ll + c_ll, v1_rr + c_rr)
+ elseif orientation == 2 # y-direction
+ λ_min = min(v2_ll - c_ll, v2_rr - c_rr)
+ λ_max = max(v2_ll + c_ll, v2_rr + c_rr)
+ else # z-direction
+ λ_min = min(v3_ll - c_ll, v3_rr - c_rr)
+ λ_max = max(v3_ll + c_ll, v3_rr + c_rr)
+ end
+
+ return λ_min, λ_max
+end
+
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::CompressibleEulerEquations3D)
+ rho_ll, v1_ll, v2_ll, v3_ll, p_ll = cons2prim(u_ll, equations)
+ rho_rr, v1_rr, v2_rr, v3_rr, p_rr = cons2prim(u_rr, equations)
+
+ norm_ = norm(normal_direction)
+
+ c_ll = sqrt(equations.gamma * p_ll / rho_ll) * norm_
+ c_rr = sqrt(equations.gamma * p_rr / rho_rr) * norm_
+
+ v_normal_ll = v1_ll * normal_direction[1] +
+ v2_ll * normal_direction[2] +
+ v3_ll * normal_direction[3]
+ v_normal_rr = v1_rr * normal_direction[1] +
+ v2_rr * normal_direction[2] +
+ v3_rr * normal_direction[3]
+
+ # The v_normals are already scaled by the norm
+ λ_min = min(v_normal_ll - c_ll, v_normal_rr - c_rr)
+ λ_max = max(v_normal_ll + c_ll, v_normal_rr + c_rr)
+
+ return λ_min, λ_max
+end
+
# Rotate normal vector to x-axis; normal, tangent1 and tangent2 need to be orthonormal
# Called inside `FluxRotated` in `numerical_fluxes.jl` so the directions
# has been normalized prior to this rotation of the state vector
diff --git a/src/equations/ideal_glm_mhd_1d.jl b/src/equations/ideal_glm_mhd_1d.jl
index 4ef593cda53..7e5c94c7bc3 100644
--- a/src/equations/ideal_glm_mhd_1d.jl
+++ b/src/equations/ideal_glm_mhd_1d.jl
@@ -277,13 +277,33 @@ end
λ_max = max(abs(v_ll), abs(v_rr)) + max(cf_ll, cf_rr)
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::IdealGlmMhdEquations1D)
+ rho_ll, rho_v1_ll, _ = u_ll
+ rho_rr, rho_v1_rr, _ = u_rr
+
+ # Calculate primitive variables
+ v1_ll = rho_v1_ll / rho_ll
+ v1_rr = rho_v1_rr / rho_rr
+
+ # Approximate the left-most and right-most eigenvalues in the Riemann fan
+ c_f_ll = calc_fast_wavespeed(u_ll, orientation, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, orientation, equations)
+
+ λ_min = min(v1_ll - c_f_ll, v1_rr - c_f_rr)
+ λ_max = max(v1_ll + c_f_ll, v1_rr + c_f_rr)
+
+ return λ_min, λ_max
+end
+
"""
- min_max_speed_naive(u_ll, u_rr, orientation, equations::IdealGlmMhdEquations1D)
+ min_max_speed_naive(u_ll, u_rr, orientation::Integer, equations::IdealGlmMhdEquations1D)
Calculate minimum and maximum wave speeds for HLL-type fluxes as in
- Li (2005)
An HLLC Riemann solver for magneto-hydrodynamics
- [DOI: 10.1016/j.jcp.2004.08.020](https://doi.org/10.1016/j.jcp.2004.08.020)
+ [DOI: 10.1016/j.jcp.2004.08.020](https://doi.org/10.1016/j.jcp.2004.08.020).
"""
@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
equations::IdealGlmMhdEquations1D)
diff --git a/src/equations/ideal_glm_mhd_2d.jl b/src/equations/ideal_glm_mhd_2d.jl
index fb3048fe883..8fef1ee22c9 100644
--- a/src/equations/ideal_glm_mhd_2d.jl
+++ b/src/equations/ideal_glm_mhd_2d.jl
@@ -585,13 +585,70 @@ end
return max(abs(v_ll), abs(v_rr)) + max(cf_ll, cf_rr)
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::IdealGlmMhdEquations2D)
+ rho_ll, rho_v1_ll, rho_v2_ll, _ = u_ll
+ rho_rr, rho_v1_rr, rho_v2_rr, _ = u_rr
+
+ # Calculate primitive velocity variables
+ v1_ll = rho_v1_ll / rho_ll
+ v2_ll = rho_v2_ll / rho_ll
+
+ v1_rr = rho_v1_rr / rho_rr
+ v2_rr = rho_v2_rr / rho_rr
+
+ # Approximate the left-most and right-most eigenvalues in the Riemann fan
+ if orientation == 1 # x-direction
+ c_f_ll = calc_fast_wavespeed(u_ll, orientation, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, orientation, equations)
+
+ λ_min = min(v1_ll - c_f_ll, v1_rr - c_f_rr)
+ λ_max = max(v1_ll + c_f_ll, v1_rr + c_f_rr)
+ else # y-direction
+ c_f_ll = calc_fast_wavespeed(u_ll, orientation, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, orientation, equations)
+
+ λ_min = min(v2_ll - c_f_ll, v2_rr - c_f_rr)
+ λ_max = max(v2_ll + c_f_ll, v1_rr + c_f_rr)
+ end
+
+ return λ_min, λ_max
+end
+
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::IdealGlmMhdEquations2D)
+ rho_ll, rho_v1_ll, rho_v2_ll, _ = u_ll
+ rho_rr, rho_v1_rr, rho_v2_rr, _ = u_rr
+
+ # Calculate primitive velocity variables
+ v1_ll = rho_v1_ll / rho_ll
+ v2_ll = rho_v2_ll / rho_ll
+
+ v1_rr = rho_v1_rr / rho_rr
+ v2_rr = rho_v2_rr / rho_rr
+
+ v_normal_ll = (v1_ll * normal_direction[1] + v2_ll * normal_direction[2])
+ v_normal_rr = (v1_rr * normal_direction[1] + v2_rr * normal_direction[2])
+
+ c_f_ll = calc_fast_wavespeed(u_ll, normal_direction, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, normal_direction, equations)
+
+ # Estimate the min/max eigenvalues in the normal direction
+ λ_min = min(v_normal_ll - c_f_ll, v_normal_rr - c_f_rr)
+ λ_max = max(v_normal_ll + c_f_ll, v_normal_rr + c_f_rr)
+
+ return λ_min, λ_max
+end
+
"""
- min_max_speed_naive(u_ll, u_rr, orientation, equations::IdealGlmMhdEquations2D)
+ min_max_speed_naive(u_ll, u_rr, orientation::Integer, equations::IdealGlmMhdEquations2D)
Calculate minimum and maximum wave speeds for HLL-type fluxes as in
- Li (2005)
An HLLC Riemann solver for magneto-hydrodynamics
- [DOI: 10.1016/j.jcp.2004.08.020](https://doi.org/10.1016/j.jcp.2004.08.020)
+ [DOI: 10.1016/j.jcp.2004.08.020](https://doi.org/10.1016/j.jcp.2004.08.020).
"""
@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
equations::IdealGlmMhdEquations2D)
@@ -635,10 +692,8 @@ end
v1_rr = rho_v1_rr / rho_rr
v2_rr = rho_v2_rr / rho_rr
- v_normal_ll = (v1_ll * normal_direction[1] +
- v2_ll * normal_direction[2])
- v_normal_rr = (v1_rr * normal_direction[1] +
- v2_rr * normal_direction[2])
+ v_normal_ll = (v1_ll * normal_direction[1] + v2_ll * normal_direction[2])
+ v_normal_rr = (v1_rr * normal_direction[1] + v2_rr * normal_direction[2])
c_f_ll = calc_fast_wavespeed(u_ll, normal_direction, equations)
c_f_rr = calc_fast_wavespeed(u_rr, normal_direction, equations)
diff --git a/src/equations/ideal_glm_mhd_3d.jl b/src/equations/ideal_glm_mhd_3d.jl
index 2e149d2849f..09990837706 100644
--- a/src/equations/ideal_glm_mhd_3d.jl
+++ b/src/equations/ideal_glm_mhd_3d.jl
@@ -670,6 +670,77 @@ end
return max(abs(v_ll), abs(v_rr)) + max(cf_ll, cf_rr)
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::IdealGlmMhdEquations3D)
+ rho_ll, rho_v1_ll, rho_v2_ll, rho_v3_ll, _ = u_ll
+ rho_rr, rho_v1_rr, rho_v2_rr, rho_v3_rr, _ = u_rr
+
+ # Calculate primitive variables and speed of sound
+ v1_ll = rho_v1_ll / rho_ll
+ v2_ll = rho_v2_ll / rho_ll
+ v3_ll = rho_v3_ll / rho_ll
+
+ v1_rr = rho_v1_rr / rho_rr
+ v2_rr = rho_v2_rr / rho_rr
+ v3_rr = rho_v3_rr / rho_rr
+
+ # Approximate the left-most and right-most eigenvalues in the Riemann fan
+ if orientation == 1 # x-direction
+ c_f_ll = calc_fast_wavespeed(u_ll, orientation, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, orientation, equations)
+
+ λ_min = min(v1_ll - c_f_ll, v1_rr - c_f_rr)
+ λ_max = max(v1_ll + c_f_ll, v1_rr + c_f_rr)
+ elseif orientation == 2 # y-direction
+ c_f_ll = calc_fast_wavespeed(u_ll, orientation, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, orientation, equations)
+
+ λ_min = min(v2_ll - c_f_ll, v2_rr - c_f_rr)
+ λ_max = max(v2_ll + c_f_ll, v2_rr + c_f_rr)
+ else # z-direction
+ c_f_ll = calc_fast_wavespeed(u_ll, orientation, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, orientation, equations)
+
+ λ_min = min(v3_ll - c_f_ll, v3_rr - c_f_rr)
+ λ_max = max(v3_ll + c_f_ll, v3_rr + c_f_rr)
+ end
+
+ return λ_min, λ_max
+end
+
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::IdealGlmMhdEquations3D)
+ rho_ll, rho_v1_ll, rho_v2_ll, rho_v3_ll, _ = u_ll
+ rho_rr, rho_v1_rr, rho_v2_rr, rho_v3_rr, _ = u_rr
+
+ # Calculate primitive velocity variables
+ v1_ll = rho_v1_ll / rho_ll
+ v2_ll = rho_v2_ll / rho_ll
+ v3_ll = rho_v3_ll / rho_ll
+
+ v1_rr = rho_v1_rr / rho_rr
+ v2_rr = rho_v2_rr / rho_rr
+ v3_rr = rho_v3_rr / rho_rr
+
+ v_normal_ll = (v1_ll * normal_direction[1] +
+ v2_ll * normal_direction[2] +
+ v3_ll * normal_direction[3])
+ v_normal_rr = (v1_rr * normal_direction[1] +
+ v2_rr * normal_direction[2] +
+ v3_rr * normal_direction[3])
+
+ c_f_ll = calc_fast_wavespeed(u_ll, normal_direction, equations)
+ c_f_rr = calc_fast_wavespeed(u_rr, normal_direction, equations)
+
+ # Estimate the min/max eigenvalues in the normal direction
+ λ_min = min(v_normal_ll - c_f_ll, v_normal_rr - c_f_rr)
+ λ_max = max(v_normal_ll + c_f_ll, v_normal_rr + c_f_rr)
+
+ return λ_min, λ_max
+end
+
"""
min_max_speed_naive(u_ll, u_rr, orientation_or_normal_direction, equations::IdealGlmMhdEquations3D)
diff --git a/src/equations/linearized_euler_2d.jl b/src/equations/linearized_euler_2d.jl
index e478c32bd29..d497762bf62 100644
--- a/src/equations/linearized_euler_2d.jl
+++ b/src/equations/linearized_euler_2d.jl
@@ -353,6 +353,44 @@ end
return SVector(f1, f2, f3, f4)
end
+# Calculate estimate for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
+ equations::LinearizedEulerEquations2D)
+ min_max_speed_davis(u_ll, u_rr, orientation, equations)
+end
+
+@inline function min_max_speed_naive(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::LinearizedEulerEquations2D)
+ min_max_speed_davis(u_ll, u_rr, normal_direction, equations)
+end
+
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::LinearizedEulerEquations2D)
+ @unpack v_mean_global, c_mean_global = equations
+
+ λ_min = v_mean_global[orientation] - c_mean_global
+ λ_max = v_mean_global[orientation] + c_mean_global
+
+ return λ_min, λ_max
+end
+
+@inline function min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::LinearizedEulerEquations2D)
+ @unpack v_mean_global, c_mean_global = equations
+
+ norm_ = norm(normal_direction)
+
+ v_normal = v_mean_global[1] * normal_direction[1] +
+ v_mean_global[2] * normal_direction[2]
+
+ # The v_normals are already scaled by the norm
+ λ_min = v_normal - c_mean_global * norm_
+ λ_max = v_normal + c_mean_global * norm_
+
+ return λ_min, λ_max
+end
+
# Convert conservative variables to primitive
@inline cons2prim(u, equations::LinearizedEulerEquations2D) = u
@inline cons2entropy(u, ::LinearizedEulerEquations2D) = u
diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl
index 16a83124d14..abd9d66c490 100644
--- a/src/equations/numerical_fluxes.jl
+++ b/src/equations/numerical_fluxes.jl
@@ -214,6 +214,10 @@ Create an HLL (Harten, Lax, van Leer) numerical flux where the minimum and maxim
wave speeds are estimated as
`λ_min, λ_max = min_max_speed(u_ll, u_rr, orientation_or_normal_direction, equations)`,
defaulting to [`min_max_speed_naive`](@ref).
+Original paper:
+- Amiram Harten, Peter D. Lax, Bram van Leer (1983)
+ On Upstream Differencing and Godunov-Type Schemes for Hyperbolic Conservation Laws
+ [DOI: 10.1137/1025002](https://doi.org/10.1137/1025002)
"""
struct FluxHLL{MinMaxSpeed}
min_max_speed::MinMaxSpeed
@@ -222,18 +226,55 @@ end
FluxHLL() = FluxHLL(min_max_speed_naive)
"""
- min_max_speed_naive(u_ll, u_rr, orientation::Integer, equations)
+ min_max_speed_naive(u_ll, u_rr, orientation::Integer, equations)
min_max_speed_naive(u_ll, u_rr, normal_direction::AbstractVector, equations)
-Simple and fast estimate of the minimal and maximal wave speed of the Riemann problem with
+Simple and fast estimate(!) of the minimal and maximal wave speed of the Riemann problem with
left and right states `u_ll, u_rr`, usually based only on the local wave speeds associated to
`u_ll` and `u_rr`.
- Amiram Harten, Peter D. Lax, Bram van Leer (1983)
On Upstream Differencing and Godunov-Type Schemes for Hyperbolic Conservation Laws
[DOI: 10.1137/1025002](https://doi.org/10.1137/1025002)
+
+See also [`FluxHLL`](@ref), [`min_max_speed_davis`](@ref), [`min_max_speed_einfeldt`](@ref).
"""
function min_max_speed_naive end
+"""
+ min_max_speed_davis(u_ll, u_rr, orientation::Integer, equations)
+ min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector, equations)
+
+Simple and fast estimates of the minimal and maximal wave speed of the Riemann problem with
+left and right states `u_ll, u_rr`, usually based only on the local wave speeds associated to
+`u_ll` and `u_rr`.
+
+- S.F. Davis (1988)
+ Simplified Second-Order Godunov-Type Methods
+ [DOI: 10.1137/0909030](https://doi.org/10.1137/0909030)
+
+See also [`FluxHLL`](@ref), [`min_max_speed_naive`](@ref), [`min_max_speed_einfeldt`](@ref).
+"""
+function min_max_speed_davis end
+
+"""
+ min_max_speed_einfeldt(u_ll, u_rr, orientation::Integer, equations)
+ min_max_speed_einfeldt(u_ll, u_rr, normal_direction::AbstractVector, equations)
+
+More advanced mininmal and maximal wave speed computation based on
+- Bernd Einfeldt (1988)
+ On Godunov-type methods for gas dynamics.
+ [DOI: 10.1137/0725021](https://doi.org/10.1137/0725021)
+- Bernd Einfeldt, Claus-Dieter Munz, Philip L. Roe and Björn Sjögreen (1991)
+ On Godunov-type methods near low densities.
+ [DOI: 10.1016/0021-9991(91)90211-3](https://doi.org/10.1016/0021-9991(91)90211-3)
+
+originally developed for the compressible Euler equations.
+A compact representation can be found in [this lecture notes, eq. (9.28)](https://metaphor.ethz.ch/x/2019/hs/401-4671-00L/literature/mishra_hyperbolic_pdes.pdf).
+
+See also [`FluxHLL`](@ref), [`min_max_speed_naive`](@ref), [`min_max_speed_davis`](@ref).
+"""
+function min_max_speed_einfeldt end
+
@inline function (numflux::FluxHLL)(u_ll, u_rr, orientation_or_normal_direction,
equations)
λ_min, λ_max = numflux.min_max_speed(u_ll, u_rr, orientation_or_normal_direction,
diff --git a/src/equations/shallow_water_1d.jl b/src/equations/shallow_water_1d.jl
index 851cbacdd57..c33b31fca81 100644
--- a/src/equations/shallow_water_1d.jl
+++ b/src/equations/shallow_water_1d.jl
@@ -460,7 +460,7 @@ end
end
end
-# Calculate minimum and maximum wave speeds for HLL-type fluxes
+# Calculate estimate for minimum and maximum wave speeds for HLL-type fluxes
@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
equations::ShallowWaterEquations1D)
h_ll = waterheight(u_ll, equations)
@@ -474,6 +474,41 @@ end
return λ_min, λ_max
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations1D)
+ h_ll = waterheight(u_ll, equations)
+ v_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v_rr = velocity(u_rr, equations)
+
+ c_ll = sqrt(equations.gravity * h_ll)
+ c_rr = sqrt(equations.gravity * h_rr)
+
+ λ_min = min(v_ll - c_ll, v_rr - c_rr)
+ λ_max = max(v_rr + c_rr, v_rr + c_rr)
+
+ return λ_min, λ_max
+end
+
+@inline function min_max_speed_einfeldt(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations1D)
+ h_ll = waterheight(u_ll, equations)
+ v_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v_rr = velocity(u_rr, equations)
+
+ c_ll = sqrt(equations.gravity * h_ll)
+ c_rr = sqrt(equations.gravity * h_rr)
+
+ v_roe, c_roe = calc_wavespeed_roe(u_ll, u_rr, orientation, equations)
+
+ λ_min = min(v_ll - c_ll, v_roe - c_roe)
+ λ_max = max(v_rr + c_rr, v_roe + c_roe)
+
+ return λ_min, λ_max
+end
+
@inline function max_abs_speeds(u, equations::ShallowWaterEquations1D)
h = waterheight(u, equations)
v = velocity(u, equations)
@@ -547,6 +582,35 @@ end
return waterheight(u, equations) * pressure(u, equations)
end
+"""
+ calc_wavespeed_roe(u_ll, u_rr, direction::Integer,
+ equations::ShallowWaterEquations1D)
+
+Calculate Roe-averaged velocity `v_roe` and wavespeed `c_roe = sqrt{g * h_roe}`
+See for instance equation (62) in
+- Paul A. Ullrich, Christiane Jablonowski, and Bram van Leer (2010)
+ High-order finite-volume methods for the shallow-water equations on the sphere
+ [DOI: 10.1016/j.jcp.2010.04.044](https://doi.org/10.1016/j.jcp.2010.04.044)
+Or equation (9.17) in [this lecture notes](https://metaphor.ethz.ch/x/2019/hs/401-4671-00L/literature/mishra_hyperbolic_pdes.pdf).
+"""
+@inline function calc_wavespeed_roe(u_ll, u_rr, direction::Integer,
+ equations::ShallowWaterEquations1D)
+ h_ll = waterheight(u_ll, equations)
+ v_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v_rr = velocity(u_rr, equations)
+
+ h_roe = 0.5 * (h_ll + h_rr)
+ c_roe = sqrt(equations.gravity * h_roe)
+
+ h_ll_sqrt = sqrt(h_ll)
+ h_rr_sqrt = sqrt(h_rr)
+
+ v_roe = (h_ll_sqrt * v_ll + h_rr_sqrt * v_rr) / (h_ll_sqrt + h_rr_sqrt)
+
+ return v_roe, c_roe
+end
+
# Entropy function for the shallow water equations is the total energy
@inline function entropy(cons, equations::ShallowWaterEquations1D)
energy_total(cons, equations)
diff --git a/src/equations/shallow_water_2d.jl b/src/equations/shallow_water_2d.jl
index f9ebbd597f9..9e227cd4a77 100644
--- a/src/equations/shallow_water_2d.jl
+++ b/src/equations/shallow_water_2d.jl
@@ -725,7 +725,7 @@ end
end
end
-# Calculate minimum and maximum wave speeds for HLL-type fluxes
+# Calculate estimates for minimum and maximum wave speeds for HLL-type fluxes
@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer,
equations::ShallowWaterEquations2D)
h_ll = waterheight(u_ll, equations)
@@ -762,6 +762,94 @@ end
return λ_min, λ_max
end
+# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
+@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ c_ll = sqrt(equations.gravity * h_ll)
+ c_rr = sqrt(equations.gravity * h_rr)
+
+ if orientation == 1 # x-direction
+ λ_min = min(v1_ll - c_ll, v1_rr - c_rr)
+ λ_max = max(v1_ll + c_ll, v1_rr + c_rr)
+ else # y-direction
+ λ_min = min(v2_ll - c_ll, v2_rr - c_rr)
+ λ_max = max(v2_ll + c_ll, v2_rr + c_rr)
+ end
+
+ return λ_min, λ_max
+end
+
+@inline function min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ norm_ = norm(normal_direction)
+ c_ll = sqrt(equations.gravity * h_ll) * norm_
+ c_rr = sqrt(equations.gravity * h_rr) * norm_
+
+ v_normal_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2]
+ v_normal_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
+
+ # The v_normals are already scaled by the norm
+ λ_min = min(v_normal_ll - c_ll, v_normal_rr - c_rr)
+ λ_max = max(v_normal_ll + c_ll, v_normal_rr + c_rr)
+
+ return λ_min, λ_max
+end
+
+@inline function min_max_speed_einfeldt(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ c_ll = sqrt(equations.gravity * h_ll)
+ c_rr = sqrt(equations.gravity * h_rr)
+
+ if orientation == 1 # x-direction
+ v_roe, c_roe = calc_wavespeed_roe(u_ll, u_rr, orientation, equations)
+ λ_min = min(v1_ll - c_ll, v_roe - c_roe)
+ λ_max = max(v1_rr + c_rr, v_roe + c_roe)
+ else # y-direction
+ v_roe, c_roe = calc_wavespeed_roe(u_ll, u_rr, orientation, equations)
+ λ_min = min(v2_ll - c_ll, v_roe - c_roe)
+ λ_max = max(v2_rr + c_rr, v_roe + c_roe)
+ end
+
+ return λ_min, λ_max
+end
+
+@inline function min_max_speed_einfeldt(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ norm_ = norm(normal_direction)
+
+ c_ll = sqrt(equations.gravity * h_ll) * norm_
+ c_rr = sqrt(equations.gravity * h_rr) * norm_
+
+ v_normal_ll = (v1_ll * normal_direction[1] + v2_ll * normal_direction[2])
+ v_normal_rr = (v1_rr * normal_direction[1] + v2_rr * normal_direction[2])
+
+ v_roe, c_roe = calc_wavespeed_roe(u_ll, u_rr, normal_direction, equations)
+ λ_min = min(v_normal_ll - c_ll, v_roe - c_roe)
+ λ_max = max(v_normal_rr + c_rr, v_roe + c_roe)
+
+ return λ_min, λ_max
+end
+
@inline function max_abs_speeds(u, equations::ShallowWaterEquations2D)
h = waterheight(u, equations)
v1, v2 = velocity(u, equations)
@@ -837,6 +925,63 @@ end
return waterheight(u, equations) * pressure(u, equations)
end
+"""
+ calc_wavespeed_roe(u_ll, u_rr, direction::Integer,
+ equations::ShallowWaterEquations2D)
+
+Calculate Roe-averaged velocity `v_roe` and wavespeed `c_roe = sqrt{g * h_roe}` depending on direction.
+See for instance equation (62) in
+- Paul A. Ullrich, Christiane Jablonowski, and Bram van Leer (2010)
+ High-order finite-volume methods for the shallow-water equations on the sphere
+ [DOI: 10.1016/j.jcp.2010.04.044](https://doi.org/10.1016/j.jcp.2010.04.044)
+Or [this slides](https://faculty.washington.edu/rjl/classes/am574w2011/slides/am574lecture20nup3.pdf),
+slides 8 and 9.
+"""
+@inline function calc_wavespeed_roe(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ h_roe = 0.5 * (h_ll + h_rr)
+ c_roe = sqrt(equations.gravity * h_roe)
+
+ h_ll_sqrt = sqrt(h_ll)
+ h_rr_sqrt = sqrt(h_rr)
+
+ if orientation == 1 # x-direction
+ v_roe = (h_ll_sqrt * v1_ll + h_rr_sqrt * v1_rr) / (h_ll_sqrt + h_rr_sqrt)
+ else # y-direction
+ v_roe = (h_ll_sqrt * v2_ll + h_rr_sqrt * v2_rr) / (h_ll_sqrt + h_rr_sqrt)
+ end
+
+ return v_roe, c_roe
+end
+
+@inline function calc_wavespeed_roe(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ norm_ = norm(normal_direction)
+
+ h_roe = 0.5 * (h_ll + h_rr)
+ c_roe = sqrt(equations.gravity * h_roe) * norm_
+
+ h_ll_sqrt = sqrt(h_ll)
+ h_rr_sqrt = sqrt(h_rr)
+
+ v1_roe = (h_ll_sqrt * v1_ll + h_rr_sqrt * v1_rr) / (h_ll_sqrt + h_rr_sqrt)
+ v2_roe = (h_ll_sqrt * v2_ll + h_rr_sqrt * v2_rr) / (h_ll_sqrt + h_rr_sqrt)
+
+ v_roe = (v1_roe * normal_direction[1] + v2_roe * normal_direction[2])
+
+ return v_roe, c_roe
+end
+
# Entropy function for the shallow water equations is the total energy
@inline function entropy(cons, equations::ShallowWaterEquations2D)
energy_total(cons, equations)
diff --git a/test/test_structured_1d.jl b/test/test_structured_1d.jl
index ec8c7a138d5..d280e2a5e01 100644
--- a/test/test_structured_1d.jl
+++ b/test/test_structured_1d.jl
@@ -39,6 +39,14 @@ isdir(outdir) && rm(outdir, recursive=true)
tspan = (0.0, 0.3))
end
+ @trixi_testset "elixir_euler_sedov_hll_davis.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"),
+ l2 = [1.278661029299215, 0.0663853410742763, 0.9585741943783386],
+ linf = [3.1661064228547255, 0.16256363944708607, 2.667676158812806],
+ tspan = (0.0, 12.5),
+ surface_flux = FluxHLL(min_max_speed_davis))
+ end
+
@trixi_testset "elixir_euler_source_terms.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_source_terms.jl"),
# Expected errors are exactly the same as with TreeMesh!
diff --git a/test/test_unit.jl b/test/test_unit.jl
index b0c3e4205e5..2ce111b2bf4 100644
--- a/test/test_unit.jl
+++ b/test/test_unit.jl
@@ -382,7 +382,7 @@ isdir(outdir) && rm(outdir, recursive=true)
@timed_testset "HLL flux with vanishing wave speed estimates (#502)" begin
equations = CompressibleEulerEquations1D(1.4)
u = SVector(1.0, 0.0, 0.0)
- @test !any(isnan, FluxHLL()(u, u, 1, equations))
+ @test !any(isnan, flux_hll(u, u, 1, equations))
end
@timed_testset "DG L2 mortar container debug output" begin
@@ -586,7 +586,265 @@ isdir(outdir) && rm(outdir, recursive=true)
@test_throws ArgumentError TimeSeriesCallback(semi, [1.0 1.0 1.0; 2.0 2.0 2.0])
end
- @timed_testset "Consistency check for HLLE flux" begin
+ @timed_testset "Consistency check for HLL flux (naive): CEE" begin
+ flux_hll = FluxHLL(min_max_speed_naive)
+
+ # Set up equations and dummy conservative variables state
+ equations = CompressibleEulerEquations1D(1.4)
+ u = SVector(1.1, 2.34, 5.5)
+
+ orientations = [1]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ equations = CompressibleEulerEquations2D(1.4)
+ u = SVector(1.1, -0.5, 2.34, 5.5)
+
+ orientations = [1, 2]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ equations = CompressibleEulerEquations3D(1.4)
+ u = SVector(1.1, -0.5, 2.34, 2.4, 5.5)
+
+ orientations = [1, 2, 3]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLL flux (naive): LEE" begin
+ flux_hll = FluxHLL(min_max_speed_naive)
+
+ equations = LinearizedEulerEquations2D(SVector(1.0, 1.0), 1.0, 1.0)
+ u = SVector(1.1, -0.5, 2.34, 5.5)
+
+ orientations = [1, 2]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLL flux (naive): SWE" begin
+ flux_hll = FluxHLL(min_max_speed_naive)
+
+ equations = ShallowWaterEquations1D(gravity_constant=9.81)
+ u = SVector(1, 0.5, 0.0)
+ @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations)
+
+ equations = ShallowWaterEquations2D(gravity_constant=9.81)
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+ u = SVector(1, 0.5, 0.5, 0.0)
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLL flux (naive): MHD" begin
+ flux_hll = FluxHLL(min_max_speed_naive)
+
+ equations = IdealGlmMhdEquations1D(1.4)
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2),]
+
+ for u in u_values
+ @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations)
+ end
+
+ equations = IdealGlmMhdEquations2D(1.4, 5.0 #= c_h =#)
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+ orientations = [1, 2]
+
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
+
+ for u in u_values, orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ for u in u_values, normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+
+ equations = IdealGlmMhdEquations3D(1.4, 5.0 #= c_h =#)
+ normal_directions = [SVector(1.0, 0.0, 0.0),
+ SVector(0.0, 1.0, 0.0),
+ SVector(0.0, 0.0, 1.0),
+ SVector(0.5, -0.5, 0.2),
+ SVector(-1.2, 0.3, 1.4)]
+ orientations = [1, 2, 3]
+
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
+
+ for u in u_values, orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ for u in u_values, normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: CEE" begin
+ flux_hll = FluxHLL(min_max_speed_davis)
+
+ # Set up equations and dummy conservative variables state
+ equations = CompressibleEulerEquations1D(1.4)
+ u = SVector(1.1, 2.34, 5.5)
+
+ orientations = [1]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ equations = CompressibleEulerEquations2D(1.4)
+ u = SVector(1.1, -0.5, 2.34, 5.5)
+
+ orientations = [1, 2]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+
+ equations = CompressibleEulerEquations3D(1.4)
+ u = SVector(1.1, -0.5, 2.34, 2.4, 5.5)
+
+ orientations = [1, 2, 3]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ normal_directions = [SVector(1.0, 0.0, 0.0),
+ SVector(0.0, 1.0, 0.0),
+ SVector(0.0, 0.0, 1.0),
+ SVector(0.5, -0.5, 0.2),
+ SVector(-1.2, 0.3, 1.4)]
+
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: LEE" begin
+ flux_hll = FluxHLL(min_max_speed_davis)
+
+ equations = LinearizedEulerEquations2D(SVector(1.0, 1.0), 1.0, 1.0)
+ u = SVector(1.1, -0.5, 2.34, 5.5)
+
+ orientations = [1, 2]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: SWE" begin
+ flux_hll = FluxHLL(min_max_speed_davis)
+
+ equations = ShallowWaterEquations1D(gravity_constant=9.81)
+ u = SVector(1, 0.5, 0.0)
+ @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations)
+
+ equations = ShallowWaterEquations2D(gravity_constant=9.81)
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+ u = SVector(1, 0.5, 0.5, 0.0)
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+
+ orientations = [1, 2]
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: MHD" begin
+ flux_hll = FluxHLL(min_max_speed_davis)
+
+ equations = IdealGlmMhdEquations1D(1.4)
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2),]
+
+ for u in u_values
+ @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations)
+ end
+
+ equations = IdealGlmMhdEquations2D(1.4, 5.0 #= c_h =#)
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+ orientations = [1, 2]
+
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
+
+ for u in u_values, orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ for u in u_values, normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+
+ equations = IdealGlmMhdEquations3D(1.4, 5.0 #= c_h =#)
+ normal_directions = [SVector(1.0, 0.0, 0.0),
+ SVector(0.0, 1.0, 0.0),
+ SVector(0.0, 0.0, 1.0),
+ SVector(0.5, -0.5, 0.2),
+ SVector(-1.2, 0.3, 1.4)]
+ orientations = [1, 2, 3]
+
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
+
+ for u in u_values, orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ for u in u_values, normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLLE flux: CEE" begin
# Set up equations and dummy conservative variables state
equations = CompressibleEulerEquations1D(1.4)
u = SVector(1.1, 2.34, 5.5)
@@ -604,6 +862,15 @@ isdir(outdir) && rm(outdir, recursive=true)
@test flux_hlle(u, u, orientation, equations) ≈ flux(u, orientation, equations)
end
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+
equations = CompressibleEulerEquations3D(1.4)
u = SVector(1.1, -0.5, 2.34, 2.4, 5.5)
@@ -611,6 +878,92 @@ isdir(outdir) && rm(outdir, recursive=true)
for orientation in orientations
@test flux_hlle(u, u, orientation, equations) ≈ flux(u, orientation, equations)
end
+
+ normal_directions = [SVector(1.0, 0.0, 0.0),
+ SVector(0.0, 1.0, 0.0),
+ SVector(0.0, 0.0, 1.0),
+ SVector(0.5, -0.5, 0.2),
+ SVector(-1.2, 0.3, 1.4)]
+
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLLE flux: SWE" begin
+ # Test HLL flux with min_max_speed_einfeldt
+ flux_hll = FluxHLL(min_max_speed_einfeldt)
+
+ equations = ShallowWaterEquations1D(gravity_constant=9.81)
+ u = SVector(1, 0.5, 0.0)
+ @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations)
+
+ equations = ShallowWaterEquations2D(gravity_constant=9.81)
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+ orientations = [1, 2]
+
+ u = SVector(1, 0.5, 0.5, 0.0)
+
+ for orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ for normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+ end
+
+ @timed_testset "Consistency check for HLLE flux: MHD" begin
+ # Test HLL flux with min_max_speed_einfeldt
+ flux_hll = FluxHLL(min_max_speed_naive)
+
+ equations = IdealGlmMhdEquations1D(1.4)
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2),]
+
+ for u in u_values
+ @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations)
+ end
+
+ equations = IdealGlmMhdEquations2D(1.4, 5.0 #= c_h =#)
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+ orientations = [1, 2]
+
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
+
+ for u in u_values, orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ for u in u_values, normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
+
+ equations = IdealGlmMhdEquations3D(1.4, 5.0 #= c_h =#)
+ normal_directions = [SVector(1.0, 0.0, 0.0),
+ SVector(0.0, 1.0, 0.0),
+ SVector(0.0, 0.0, 1.0),
+ SVector(0.5, -0.5, 0.2),
+ SVector(-1.2, 0.3, 1.4)]
+ orientations = [1, 2, 3]
+
+ u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
+ SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
+
+ for u in u_values, orientation in orientations
+ @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations)
+ end
+
+ for u in u_values, normal_direction in normal_directions
+ @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations)
+ end
end
@timed_testset "Consistency check for Godunov flux" begin
@@ -780,7 +1133,8 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(-1.2, 0.3)]
u_values = [SVector(1.0, 0.5, -0.7, 1.0),
SVector(1.5, -0.2, 0.1, 5.0),]
- fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber]
+ fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber,
+ flux_hll, FluxHLL(min_max_speed_davis)]
for f_std in fluxes
f_rot = FluxRotated(f_std)
@@ -799,7 +1153,8 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(-1.2, 0.3, 1.4)]
u_values = [SVector(1.0, 0.5, -0.7, 0.1, 1.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0),]
- fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, FluxLMARS(340)]
+ fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, FluxLMARS(340),
+ flux_hll, FluxHLL(min_max_speed_davis)]
for f_std in fluxes
f_rot = FluxRotated(f_std)
@@ -809,6 +1164,20 @@ isdir(outdir) && rm(outdir, recursive=true)
end
end
+ @timed_testset "ShallowWaterEquations2D" begin
+ equations = ShallowWaterEquations2D(gravity_constant=9.81)
+ normal_directions = [SVector(1.0, 0.0),
+ SVector(0.0, 1.0),
+ SVector(0.5, -0.5),
+ SVector(-1.2, 0.3)]
+
+ u = SVector(1, 0.5, 0.5, 0.0)
+
+ fluxes = [flux_central, flux_fjordholm_etal, flux_wintermeyer_etal,
+ flux_hll, FluxHLL(min_max_speed_davis), FluxHLL(min_max_speed_einfeldt)]
+
+ end
+
@timed_testset "IdealGlmMhdEquations2D" begin
equations = IdealGlmMhdEquations2D(1.4, 5.0 #= c_h =#)
normal_directions = [SVector(1.0, 0.0),
@@ -817,7 +1186,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(-1.2, 0.3)]
u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
- fluxes = [flux_central, flux_hindenlang_gassner]
+ fluxes = [flux_central, flux_hindenlang_gassner, flux_hll, FluxHLL(min_max_speed_davis)]
for f_std in fluxes
f_rot = FluxRotated(f_std)
@@ -836,7 +1205,7 @@ isdir(outdir) && rm(outdir, recursive=true)
SVector(-1.2, 0.3, 1.4)]
u_values = [SVector(1.0, 0.4, -0.5, 0.1, 1.0, 0.1, -0.2, 0.1, 0.0),
SVector(1.5, -0.2, 0.1, 0.2, 5.0, -0.1, 0.1, 0.2, 0.2),]
- fluxes = [flux_central, flux_hindenlang_gassner]
+ fluxes = [flux_central, flux_hindenlang_gassner, flux_hll, FluxHLL(min_max_speed_davis)]
for f_std in fluxes
f_rot = FluxRotated(f_std)
From dd91d7ed7fe99f437d8d0261cf7f9c43eb32c95b Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 14 Jul 2023 07:33:12 +0200
Subject: [PATCH 09/40] set version to v0.5.32
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 828f4778f74..f3ede1c74b4 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.32-pre"
+version = "0.5.32"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 0816ed0b62679bcd656dc38bad68034843632ba1 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 14 Jul 2023 07:33:26 +0200
Subject: [PATCH 10/40] set development version to v0.5.33-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index f3ede1c74b4..4a289380850 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.32"
+version = "0.5.33-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 905c8e29ef30bdb2648fa3073166ad0887cd1278 Mon Sep 17 00:00:00 2001
From: Andrew Winters
Date: Fri, 14 Jul 2023 17:47:20 +0200
Subject: [PATCH 11/40] Merge wet/dry capability to `main` (#1501)
* add dummy commit in order to open a dev to main PR
* [WIP] Wet/dry capabilities for 2D shallow water equations (#1340)
* HR of Chen and Noelle (1D) and edit SWE struct
* Overload limiter (SWE 1D) to cut off waterheight
* New indicatorHG (SWE 1D) to apply FV on dry cells
* Threshold in rhs! before calculation (SWE 1D)
* New lake_at_rest_error for SWE 1D
* New wet/dry elixirs for testing scheme for SWE 1D
* HR of Chen and Noelle (2D) and edit SWE struct
* Overload limiter (SWE 2D) to cut off waterheight
* New indicatorHG (SWE 2D) to apply FV on dry cells
* Threshold in rhs! before calculation (SWE 2D)
* New lake_at_rest_error for SWE 2D
* New wet/dry elixirs for testing scheme for SWE 2D
* Elixir SWE 2D: 3 mounds, problem with boundaries
* Fixed MethodError; apply_thresholds! too strict
* Fixed MethodError; apply_thresholds! too strict
* Move threshold on volume integral in stage_limiter
* Indentation, spacing and comments adjustment
* Renaming numerical HLL type flux (SWE 1D)
* Move threshold on volume integral in stage_limiter
* Renaming numerical HLL type flux (SWE 2D)
* Indentation, spacing and comments adjustment
* Describing docs for Chen and Noelle HR (SWE 1D)
* Edit SWE 1D elixirs, error-based solver and docs
* Including tests on new SWE 1D elixirs
* Describing docs for Chen and Noelle HR (SWE 2D)
* Edit SWE 2D elixirs, error-based solver and docs
* Including tests on new SWE 2D elixirs
* New/reorganize positivity limiter (SWE 2D)
* New/reorganize positivity limiter (SWE 1D)
* Editing docs SWE 1D
* Editing docs SWE 2D
* Rearrange cut off at interfaces, edit tests SWE 1D
* Edit docs, add Ref
* Edit docs and indenting (SWE 2D)
* Rearrange cut off at interfaces, edit tests SWE 2D
* Remove tree/structured mesh elixir from repo SWE2D
* Create unstructured mesh elixir SWE 2D
* Add 1D lake-at-rest-error logic to pass 1D tests
* Add 2D lake-at-rest-error logic to pass 2D tests
* Fixed typo. Confusing name, but correct math
* Correction of comments and docstrings
* Correction of comments and docstrings
* Rename mesh file in elixir for UnstructuredMesh
* Update test_unstructured_2d.jl
forgot an end statement for the new test
* Fixing typos
* fix dispatching error on new lake-at-rest error calculation. See if this fixes broken tests
* Editing initial condition in parabolic bowl elixir
* Delete unnecessary variable in elixir
* adjust lake-at-rest error computation strategy. move specialized version of error into the wet-dry elixir as the new functionality was only needed in this speacial case. update corresponding test values as the bottom is now truly discontinuous
* update structured mesh version of the wet-dry well-balancedness test
* fix typos
* update values in parabolic bowl test on StructuredMesh
* update parabolic bowl test on TreeMesh
* revert the 1D computation of the lake-at-rest error to the standard way. This will change once the 1D wet/dry merges
* Reset lake-at-rest error computation strategy.
New version of error only in wet-dry elixir (special case)
Update test values as the bottom is now truly discontinuous
* Fix typo
* Shorten test run for parabolic bowl 1D
* Choose lower resolution for parabolic bowl
and update test values
* Further reduce resolution for parabolic bowl
and update test values
* adjust special initial conditions and well-balancedness error routines to avoid the need of element IDs
* Remove MPI from well-balanced test
* simplify workaround to set discontinuous initial data
* Simplify workaround to set discontinuity
* Change structure of Chen&Noelle flux
* Fix typos and indenting
* Adjust call of solve and use ode_default_options
* Edit docstring
* Replace boolean with if, remove set_node_vars
Shorten test runs on TreeMesh and UnstructuredMesh
* Change structure of Chen&Noelle flux
* Fix typos and indenting
* Adjust call of solve and use ode_default_options
* Edit docstring
* Replace boolean with if, remove set_node_vars
Shorten test runs on TreeMesh and UnstructuredMesh
* Update comment regarding H0 for lake-at-rest error
* Add the original source to the parabolic bowl test
* Update comment regarding H0 for lake-at-rest error
* Add the original source to the parabolic bowl test
* New sc indicator especially for SWE
* Remove threshold parameter from SWE limiter call
* update some docstrings
* remove type instability in positivty limiter
* typo fix
* move safety check for dry state in the new positivity limiter into the same element loop
* more docstring updates
* remove dummy comment added in the dev initial commit
* adjust default threshold values to be precision agnostic
* update comment on the default threshold value in the new TreeMesh elixirs
* update comments for the three new TreeMesh examples
* update IC comment for three mound test
* update IC comments for new StructuredMesh2D tests
* update comment on shallow water constructor
* adjust comments in the shallow_water_2d file
* adjust comment regarding threshold_limiter in the new elixirs
* fix typos found by SpellCheck
* Edit docs
* Import Printf macros for printing wb error
* Remove type instability in Chen & Noelle HR
* Change logic for setting SC indicator to one
* Change logic for default values of SWE struct
* Outsource HG shock capturing indicator for SWE
Create different function to compute indicator
Edit comments
Change wet/dry clipping to if-else logic
* Move limiterthreshold into function & edit docs
Threshold was a passed variable in elixir before.
Now, it is taken right from the SWE struct in the limiter
Edit docs
* Move new limiter safety check in same element loop
* Adjust default threshold values
* Remove type instability
* Import Printf package for terminal output
* Edit docs
* Add Printf package to the test/Project.toml
Used for printing lake-at-rest error in well-balancedness test
* Add Printf package to the test/Project.toml
Used for printing lake-at-rest error in well-balancedness test
* Typo fix in elixir_shallowwater_well_balanced_wet_dry.jl
* Typo fix in elixir_shallowwater_well_balanced_wet_dry.jl
* unify new code with required formatting
* fix weird formatting and add 'format: noindent' where missing. fix crashing structured mesh run
* add unit test for new show routine
* apply JuliaFormatter
* simplify elixir as we can set discontinuous ICs in 1D. Also update beach test values
* dummy commit to check push access
* remove dummy comment
* typo fix
---------
Co-authored-by: Andrew Winters
Co-authored-by: Michael Schlottke-Lakemper
* adjust comments and remove duplicate code
* add TODOs for code pieces that should move to TrixiShallowWater package
* remove accidentally added file
* apply formatter to avoid errors with new comments
* move TODO comments to avoid errors in Documentation build
* Apply suggestions from code review
Co-authored-by: Hendrik Ranocha
* remove unnecessary analysis quantities from several new elixirs
* rename local threshold variable in new indicator to avoid confusion
* update NEWS.md with wetting and drying feature
* fix fomartting issue from conflict resolution
---------
Co-authored-by: svengoldberg <102215246+svengoldberg@users.noreply.github.com>
Co-authored-by: Michael Schlottke-Lakemper
Co-authored-by: Hendrik Ranocha
---
NEWS.md | 1 +
.../elixir_shallowwater_conical_island.jl | 113 ++++++++
.../elixir_shallowwater_parabolic_bowl.jl | 119 ++++++++
...ixir_shallowwater_well_balanced_wet_dry.jl | 200 +++++++++++++
.../elixir_shallowwater_beach.jl | 121 ++++++++
.../elixir_shallowwater_parabolic_bowl.jl | 117 ++++++++
...ixir_shallowwater_well_balanced_wet_dry.jl | 165 +++++++++++
.../elixir_shallowwater_conical_island.jl | 116 ++++++++
.../elixir_shallowwater_parabolic_bowl.jl | 120 ++++++++
...ixir_shallowwater_well_balanced_wet_dry.jl | 198 +++++++++++++
...ixir_shallowwater_three_mound_dam_break.jl | 139 +++++++++
src/Trixi.jl | 9 +-
src/callbacks_stage/callbacks_stage.jl | 2 +
.../positivity_shallow_water.jl | 89 ++++++
.../positivity_shallow_water_dg1d.jl | 89 ++++++
.../positivity_shallow_water_dg2d.jl | 90 ++++++
src/equations/numerical_fluxes.jl | 23 ++
src/equations/shallow_water_1d.jl | 192 ++++++++++++-
src/equations/shallow_water_2d.jl | 270 +++++++++++++++++-
src/equations/shallow_water_two_layer_1d.jl | 2 +
src/equations/shallow_water_two_layer_2d.jl | 96 ++++---
src/solvers/dgsem_tree/indicators.jl | 73 ++++-
src/solvers/dgsem_tree/indicators_1d.jl | 109 +++++++
src/solvers/dgsem_tree/indicators_2d.jl | 110 +++++++
test/Project.toml | 1 +
test/test_structured_2d.jl | 25 +-
test/test_tree_1d_shallowwater.jl | 23 ++
test/test_tree_1d_shallowwater_twolayer.jl | 2 +
test/test_tree_2d_shallowwater.jl | 24 ++
test/test_tree_2d_shallowwater_twolayer.jl | 20 +-
test/test_unit.jl | 4 +
test/test_unstructured_2d.jl | 18 ++
32 files changed, 2608 insertions(+), 72 deletions(-)
create mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl
create mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl
create mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
create mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_beach.jl
create mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl
create mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
create mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl
create mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl
create mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
create mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl
create mode 100644 src/callbacks_stage/positivity_shallow_water.jl
create mode 100644 src/callbacks_stage/positivity_shallow_water_dg1d.jl
create mode 100644 src/callbacks_stage/positivity_shallow_water_dg2d.jl
diff --git a/NEWS.md b/NEWS.md
index 35c7039b2ef..8e374d9ce99 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -10,6 +10,7 @@ for human readability.
- Experimental support for 3D parabolic diffusion terms has been added.
- Capability to set truly discontinuous initial conditions in 1D.
+- Wetting and drying feature and examples for 1D and 2D shallow water equations
#### Changed
diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl b/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl
new file mode 100644
index 00000000000..44bc7a12b35
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl
@@ -0,0 +1,113 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+ ###############################################################################
+ # Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations2D(gravity_constant=9.81, H0=1.4)
+
+"""
+ initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D)
+
+Initial condition for the [`ShallowWaterEquations2D`](@ref) to test the [`hydrostatic_reconstruction_chen_noelle`](@ref)
+and its handling of discontinuous water heights at the start in combination with wetting and
+drying. The bottom topography is given by a conical island in the middle of the domain. Around that
+island, there is a cylindrical water column at t=0 and the rest of the domain is dry. This
+discontinuous water height is smoothed by a logistic function. This simulation uses periodic
+boundary conditions.
+"""
+function initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D)
+ # Set the background values
+
+ v1 = 0.0
+ v2 = 0.0
+
+ x1, x2 = x
+ b = max(0.1, 1.0 - 4.0 * sqrt(x1^2 + x2^2))
+
+ # use a logistic function to transfer water height value smoothly
+ L = equations.H0 # maximum of function
+ x0 = 0.3 # center point of function
+ k = -25.0 # sharpness of transfer
+
+ H = max(b, L/(1.0 + exp(-k*(sqrt(x1^2+x2^2) - x0))))
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v1, v2, b), equations)
+end
+
+initial_condition = initial_condition_conical_island
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(4)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+###############################################################################
+# Get the StructuredMesh and setup a periodic mesh
+
+coordinates_min = (-1.0, -1.0)
+coordinates_max = (1.0, 1.0)
+
+cells_per_dimension = (16, 16)
+
+mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max)
+
+# Create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solver
+
+tspan = (0.0, 10.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# Callbacks
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+###############################################################################
+# run the simulation
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+sol = solve(ode, SSPRK43(stage_limiter!);
+ ode_default_options()..., callback=callbacks);
+
+summary_callback() # print the timer summary
\ No newline at end of file
diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl
new file mode 100644
index 00000000000..15cfe6698fc
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl
@@ -0,0 +1,119 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations2D(gravity_constant=9.81)
+
+"""
+ initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D)
+
+Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its
+wet-dry mechanics. This test has an analytical solution. The initial condition is defined by the
+analytical solution at time t=0. The bottom topography defines a bowl and the water level is given
+by an oscillating lake.
+
+The original test and its analytical solution were first presented in
+- William C. Thacker (1981)
+ Some exact solutions to the nonlinear shallow-water wave equations
+ [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882).
+
+The particular setup below is taken from Section 6.2 of
+- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018)
+ An entropy stable discontinuous Galerkin method for the shallow water equations on
+ curvilinear meshes with wet/dry fronts accelerated by GPUs
+ [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038).
+"""
+function initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D)
+ a = 1.0
+ h_0 = 0.1
+ sigma = 0.5
+ ω = sqrt(2 * equations.gravity * h_0) / a
+
+ v1 = -sigma * ω * sin(ω * t)
+ v2 = sigma * ω * cos(ω * t)
+
+ b = h_0 * ((x[1])^2 + (x[2])^2) / a^2
+
+ H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) + 2 * x[2] * sin(ω * t) - sigma) + h_0
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v1, v2, b), equations)
+end
+
+initial_condition = initial_condition_parabolic_bowl
+
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(4)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.6,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+
+###############################################################################
+
+coordinates_min = (-2.0, -2.0)
+coordinates_max = (2.0, 2.0)
+
+cells_per_dimension = (150, 150)
+
+mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=false,
+ extra_analysis_integrals=(energy_kinetic,
+ energy_internal))
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, SSPRK43(stage_limiter!);
+ ode_default_options()..., callback=callbacks);
+
+summary_callback() # print the timer summary
diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
new file mode 100644
index 00000000000..b18b02e0b4c
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
@@ -0,0 +1,200 @@
+
+using OrdinaryDiffEq
+using Trixi
+using Printf: @printf, @sprintf
+
+###############################################################################
+# Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+
+equations = ShallowWaterEquations2D(gravity_constant=9.812)
+
+"""
+ initial_condition_well_balanced_chen_noelle(x, t, equations:: ShallowWaterEquations2D)
+
+Initial condition with a complex (discontinuous) bottom topography to test the well-balanced
+property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the
+domain. The errors from the analysis callback are not important but the error for this
+lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff.
+
+The initial condition is taken from Section 5.2 of the paper:
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+function initial_condition_complex_bottom_well_balanced(x, t, equations:: ShallowWaterEquations2D)
+ v1 = 0
+ v2 = 0
+ b = sin(4 * pi * x[1]) + 3
+
+ if x[1] >= 0.5
+ b = sin(4 * pi * x[1]) + 1
+ end
+
+ H = max(b, 2.5)
+
+ if x[1] >= 0.5
+ H = max(b, 1.5)
+ end
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v1, v2, b), equations)
+end
+
+initial_condition = initial_condition_complex_bottom_well_balanced
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(3)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+
+###############################################################################
+# Create the StructuredMesh for the domain [0, 1]^2
+
+coordinates_min = (0.0, 0.0)
+coordinates_max = (1.0, 1.0)
+
+cells_per_dimension = (16, 16)
+
+mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max)
+
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 10.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# Workaround to set a discontinuous water and bottom topography for
+# debugging and testing. Essentially, this is a slight augmentation of the
+# `compute_coefficients` where the `x` node value passed here is slightly
+# perturbed to the left / right in order to set a true discontinuity that avoids
+# the doubled value of the LGL nodes at a particular element interface.
+#
+# Note! The errors from the analysis callback are not important but the error
+# for this lake at rest test case `∑|H0-(h+b)|` should be near machine roundoff.
+
+# point to the data we want to augment
+u = Trixi.wrap_array(ode.u0, semi)
+# reset the initial condition
+for element in eachelement(semi.solver, semi.cache)
+ for j in eachnode(semi.solver), i in eachnode(semi.solver)
+ x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, semi.solver, i, j, element)
+ # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor
+ # of unit roundoff to avoid the repeted value from the LGL nodes at at interface.
+ if i == 1
+ x_node = SVector(nextfloat(x_node[1]) , x_node[2])
+ elseif i == nnodes(semi.solver)
+ x_node = SVector(prevfloat(x_node[1]) , x_node[2])
+ end
+ u_node = initial_condition_complex_bottom_well_balanced(x_node, first(tspan), equations)
+ Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element)
+ end
+end
+
+###############################################################################
+# Callbacks
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=false)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=1000,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+stepsize_callback = StepsizeCallback(cfl=1.0)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, stepsize_callback)
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, SSPRK43(stage_limiter!); dt=1.0,
+ ode_default_options()..., callback=callbacks, adaptive=false);
+
+summary_callback() # print the timer summary
+
+###############################################################################
+# Workaround to compute the well-balancedness error for this particular problem
+# that has two reference water heights. One for a lake to the left of the
+# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the
+# right of the discontinuous bottom topography `H0_lower = 1.5`.
+
+# Declare a special version of the function to compute the lake-at-rest error
+# OBS! The reference water height values are hardcoded for convenience.
+function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations2D)
+ h, _, _, b = u
+
+ # For well-balancedness testing with possible wet/dry regions the reference
+ # water height `H0` accounts for the possibility that the bottom topography
+ # can emerge out of the water as well as for the threshold offset to avoid
+ # division by a "hard" zero water heights as well.
+ if x[1] < 0.5
+ H0_wet_dry = max( 2.5 , b + equations.threshold_limiter )
+ else
+ H0_wet_dry = max( 1.5 , b + equations.threshold_limiter )
+ end
+
+ return abs(H0_wet_dry - (h + b))
+end
+
+# point to the data we want to analyze
+u = Trixi.wrap_array(sol[end], semi)
+# Perform the actual integration of the well-balancedness error over the domain
+l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, semi.cache; normalize=true) do u, i, j, element, equations, solver
+ x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, i, j, element)
+ # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor
+ # of unit roundoff to avoid the repeted value from the LGL nodes at at interface.
+ if i == 1
+ x_node = SVector(nextfloat(x_node[1]) , x_node[2])
+ elseif i == nnodes(semi.solver)
+ x_node = SVector(prevfloat(x_node[1]) , x_node[2])
+ end
+ u_local = Trixi.get_node_vars(u, equations, solver, i, j, element)
+ return lake_at_rest_error_two_level(u_local, x_node, equations)
+end
+
+# report the well-balancedness lake-at-rest error to the screen
+println("─"^100)
+println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver),
+ " at final time " * @sprintf("%10.8e", tspan[end]))
+
+@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error))
+@printf(" % 10.8e", l1_well_balance_error)
+println()
+println("─"^100)
diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl b/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl
new file mode 100644
index 00000000000..1288bc5e66a
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl
@@ -0,0 +1,121 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations1D(gravity_constant=9.812)
+
+"""
+ initial_condition_beach(x, t, equations:: ShallowWaterEquations1D)
+Initial condition to simulate a wave running towards a beach and crashing. Difficult test
+including both wetting and drying in the domain using slip wall boundary conditions.
+The bottom topography is altered to be differentiable on the domain [0,8] and
+differs from the reference below.
+
+The water height and speed functions used here, are adapted from the initial condition
+found in section 5.2 of the paper:
+ - Andreas Bollermann, Sebastian Noelle, Maria Lukáčová-Medvid’ová (2011)
+ Finite volume evolution Galerkin methods for the shallow water equations with dry beds\n
+ [DOI: 10.4208/cicp.220210.020710a](https://dx.doi.org/10.4208/cicp.220210.020710a)
+"""
+function initial_condition_beach(x, t, equations:: ShallowWaterEquations1D)
+ D = 1
+ delta = 0.02
+ gamma = sqrt((3 * delta) / (4 * D))
+ x_a = sqrt((4 * D) / (3 * delta)) * acosh(sqrt(20))
+
+ f = D + 40 * delta * sech(gamma * (8 * x[1] - x_a))^2
+
+ # steep curved beach
+ b = 0.01 + 99 / 409600 * 4^x[1]
+
+ if x[1] >= 6
+ H = b
+ v = 0.0
+ else
+ H = f
+ v = sqrt(equations.gravity / D) * H
+ end
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v, b), equations)
+end
+
+initial_condition = initial_condition_beach
+boundary_condition = boundary_condition_slip_wall
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(3)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+###############################################################################
+# Create the TreeMesh for the domain [0, 8]
+
+coordinates_min = 0.0
+coordinates_max = 8.0
+
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=7,
+ n_cells_max=10_000,
+ periodicity=false)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions=boundary_condition)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 10.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=false,
+ extra_analysis_integrals=(energy_kinetic,
+ energy_internal))
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(dt=0.5,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, SSPRK43(stage_limiter!);
+ ode_default_options()..., callback=callbacks);
+
+summary_callback() # print the timer summary
\ No newline at end of file
diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl
new file mode 100644
index 00000000000..916bba76ece
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl
@@ -0,0 +1,117 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations1D(gravity_constant=9.81)
+
+"""
+ initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations1D)
+
+Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its
+wet-dry mechanics. This test has analytical solutions. The initial condition is defined by the
+analytical solution at time t=0. The bottom topography defines a bowl and the water level is given
+by an oscillating lake.
+
+The original test and its analytical solution in two dimensions were first presented in
+- William C. Thacker (1981)
+ Some exact solutions to the nonlinear shallow-water wave equations
+ [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882).
+
+The particular setup below is taken from Section 6.2 of
+- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018)
+ An entropy stable discontinuous Galerkin method for the shallow water equations on
+ curvilinear meshes with wet/dry fronts accelerated by GPUs
+ [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038).
+"""
+function initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations1D)
+ a = 1
+ h_0 = 0.1
+ sigma = 0.5
+ ω = sqrt(2 * equations.gravity * h_0) / a
+
+ v = -sigma * ω * sin(ω * t)
+
+ b = h_0 * x[1]^2 / a^2
+
+ H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) - sigma) + h_0
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v, b), equations)
+end
+
+initial_condition = initial_condition_parabolic_bowl
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(5)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+###############################################################################
+# Create the TreeMesh for the domain [-2, 2]
+
+coordinates_min = -2.0
+coordinates_max = 2.0
+
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=6,
+ n_cells_max=10_000)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 10.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=false,
+ extra_analysis_integrals=(energy_kinetic,
+ energy_internal))
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=1000,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, SSPRK43(stage_limiter!);
+ ode_default_options()..., callback=callbacks);
+
+summary_callback() # print the timer summary
\ No newline at end of file
diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
new file mode 100644
index 00000000000..8de46c61794
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
@@ -0,0 +1,165 @@
+
+using OrdinaryDiffEq
+using Trixi
+using Printf: @printf, @sprintf
+
+###############################################################################
+# Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations1D(gravity_constant=9.812)
+
+"""
+ initial_condition_complex_bottom_well_balanced(x, t, equations:: ShallowWaterEquations1D)
+
+Initial condition with a complex (discontinuous) bottom topography to test the well-balanced
+property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the
+domain. The errors from the analysis callback are not important but the error for this
+lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff.
+
+The initial condition is taken from Section 5.2 of the paper:
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+function initial_condition_complex_bottom_well_balanced(x, t, equations:: ShallowWaterEquations1D)
+ v = 0.0
+ b = sin(4 * pi * x[1]) + 3
+
+ if x[1] >= 0.5
+ b = sin(4 * pi * x[1]) + 1
+ end
+
+ H = max(b, 2.5)
+
+ if x[1] >= 0.5
+ H = max(b, 1.5)
+ end
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v, b), equations)
+end
+
+initial_condition = initial_condition_complex_bottom_well_balanced
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(3)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+###############################################################################
+# Create the TreeMesh for the domain [0, 1]
+
+coordinates_min = 0.0
+coordinates_max = 1.0
+
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=6,
+ n_cells_max=10_000)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 25.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 5000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=false)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=5000,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+stepsize_callback = StepsizeCallback(cfl=1.5)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution,
+ stepsize_callback)
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, SSPRK43(stage_limiter!); dt=1.0,
+ ode_default_options()..., callback=callbacks, adaptive=false);
+
+summary_callback() # print the timer summary
+
+###############################################################################
+# Workaround to compute the well-balancedness error for this particular problem
+# that has two reference water heights. One for a lake to the left of the
+# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the
+# right of the discontinuous bottom topography `H0_lower = 1.5`.
+
+# Declare a special version of the function to compute the lake-at-rest error
+# OBS! The reference water height values are hardcoded for convenience.
+function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations1D)
+ h, _, b = u
+
+ # For well-balancedness testing with possible wet/dry regions the reference
+ # water height `H0` accounts for the possibility that the bottom topography
+ # can emerge out of the water as well as for the threshold offset to avoid
+ # division by a "hard" zero water heights as well.
+ if x[1] < 0.5
+ H0_wet_dry = max( 2.5 , b + equations.threshold_limiter )
+ else
+ H0_wet_dry = max( 1.5 , b + equations.threshold_limiter )
+ end
+
+ return abs(H0_wet_dry - (h + b))
+ end
+
+# point to the data we want to analyze
+u = Trixi.wrap_array(sol[end], semi)
+# Perform the actual integration of the well-balancedness error over the domain
+l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, semi.cache; normalize=true) do u, i, element, equations, solver
+ x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, i, element)
+ # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor
+ # of unit roundoff to avoid the repeted value from the LGL nodes at at interface.
+ if i == 1
+ x_node = SVector(nextfloat(x_node[1]))
+ elseif i == nnodes(semi.solver)
+ x_node = SVector(prevfloat(x_node[1]))
+ end
+ u_local = Trixi.get_node_vars(u, equations, solver, i, element)
+ return lake_at_rest_error_two_level(u_local, x_node, equations)
+end
+
+# report the well-balancedness lake-at-rest error to the screen
+println("─"^100)
+println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver),
+ " at final time " * @sprintf("%10.8e", tspan[end]))
+
+@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error))
+@printf(" % 10.8e", l1_well_balance_error)
+println()
+println("─"^100)
\ No newline at end of file
diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl b/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl
new file mode 100644
index 00000000000..7c60e35b03e
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl
@@ -0,0 +1,116 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations2D(gravity_constant=9.81, H0=1.4)
+
+"""
+ initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D)
+
+Initial condition for the [`ShallowWaterEquations2D`](@ref) to test the [`hydrostatic_reconstruction_chen_noelle`](@ref)
+and its handling of discontinuous water heights at the start in combination with wetting and
+drying. The bottom topography is given by a conical island in the middle of the domain. Around that
+island, there is a cylindrical water column at t=0 and the rest of the domain is dry. This
+discontinuous water height is smoothed by a logistic function. This simulation uses a Dirichlet
+boundary condition with the initial values. Due to the dry cells at the boundary, this has the
+effect of an outflow which can be seen in the simulation.
+"""
+function initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D)
+ # Set the background values
+
+ v1 = 0.0
+ v2 = 0.0
+
+ x1, x2 = x
+ b = max(0.1, 1.0 - 4.0 * sqrt(x1^2 + x2^2))
+
+ # use a logistic function to transfer water height value smoothly
+ L = equations.H0 # maximum of function
+ x0 = 0.3 # center point of function
+ k = -25.0 # sharpness of transfer
+
+ H = max(b, L/(1.0 + exp(-k*(sqrt(x1^2+x2^2) - x0))))
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v1, v2, b), equations)
+end
+
+initial_condition = initial_condition_conical_island
+boundary_conditions = BoundaryConditionDirichlet(initial_condition)
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(4)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+###############################################################################
+# Get the TreeMesh and setup a mesh
+
+coordinates_min = (-1.0, -1.0)
+coordinates_max = (1.0, 1.0)
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=4,
+ n_cells_max=10_000,
+ periodicity=false)
+
+# Create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions=boundary_conditions)
+
+###############################################################################
+# ODE solver
+
+tspan = (0.0, 10.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# Callbacks
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+###############################################################################
+# run the simulation
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+sol = solve(ode, SSPRK43(stage_limiter!);
+ ode_default_options()..., callback=callbacks);
+
+summary_callback() # print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl
new file mode 100644
index 00000000000..03dcf017266
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl
@@ -0,0 +1,120 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations2D(gravity_constant=9.81)
+
+"""
+ initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D)
+
+Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its
+wet-dry mechanics. This test has an analytical solution. The initial condition is defined by the
+analytical solution at time t=0. The bottom topography defines a bowl and the water level is given
+by an oscillating lake.
+
+The original test and its analytical solution were first presented in
+- William C. Thacker (1981)
+ Some exact solutions to the nonlinear shallow-water wave equations
+ [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882).
+
+The particular setup below is taken from Section 6.2 of
+- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018)
+ An entropy stable discontinuous Galerkin method for the shallow water equations on
+ curvilinear meshes with wet/dry fronts accelerated by GPUs
+ [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038).
+"""
+function initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D)
+ a = 1.0
+ h_0 = 0.1
+ sigma = 0.5
+ ω = sqrt(2 * equations.gravity * h_0) / a
+
+ v1 = -sigma * ω * sin(ω * t)
+ v2 = sigma * ω * cos(ω * t)
+
+ b = h_0 * ((x[1])^2 + (x[2])^2) / a^2
+
+ H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) + 2 * x[2] * sin(ω * t) - sigma) + h_0
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v1, v2, b), equations)
+end
+
+initial_condition = initial_condition_parabolic_bowl
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(7)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.6,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+
+###############################################################################
+# Create the TreeMesh for the domain [-2, 2]^2
+
+coordinates_min = (-2.0, -2.0)
+coordinates_max = (2.0, 2.0)
+
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=5,
+ n_cells_max=10_000)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=false,
+ extra_analysis_integrals=(energy_kinetic,
+ energy_internal))
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+###############################################################################
+# run the simulation
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+sol = solve(ode, SSPRK43(stage_limiter!);
+ ode_default_options()..., callback=callbacks);
+
+summary_callback() # print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
new file mode 100644
index 00000000000..6fede2fa4ea
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl
@@ -0,0 +1,198 @@
+
+using OrdinaryDiffEq
+using Trixi
+using Printf: @printf, @sprintf
+
+###############################################################################
+# Semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+equations = ShallowWaterEquations2D(gravity_constant=9.812)
+
+"""
+ initial_condition_well_balanced_chen_noelle(x, t, equations:: ShallowWaterEquations2D)
+
+Initial condition with a complex (discontinuous) bottom topography to test the well-balanced
+property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the
+domain. The errors from the analysis callback are not important but the error for this
+lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff.
+
+The initial condition is taken from Section 5.2 of the paper:
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+function initial_condition_complex_bottom_well_balanced(x, t, equations::ShallowWaterEquations2D)
+ v1 = 0
+ v2 = 0
+ b = sin(4 * pi * x[1]) + 3
+
+ if x[1] >= 0.5
+ b = sin(4 * pi * x[1]) + 1
+ end
+
+ H = max(b, 2.5)
+ if x[1] >= 0.5
+ H = max(b, 1.5)
+ end
+
+ # It is mandatory to shift the water level at dry areas to make sure the water height h
+ # stays positive. The system would not be stable for h set to a hard 0 due to division by h in
+ # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold
+ # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above
+ # for the ShallowWaterEquations and added to the initial condition if h = 0.
+ # This default value can be changed within the constructor call depending on the simulation setup.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v1, v2, b), equations)
+end
+
+initial_condition = initial_condition_complex_bottom_well_balanced
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(3)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+###############################################################################
+# Create the TreeMesh for the domain [0, 1]^2
+
+coordinates_min = (0.0, 0.0)
+coordinates_max = (1.0, 1.0)
+
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=3,
+ n_cells_max=10_000)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 50.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# Workaround to set a discontinuous water and bottom topography for
+# debugging and testing. Essentially, this is a slight augmentation of the
+# `compute_coefficients` where the `x` node value passed here is slightly
+# perturbed to the left / right in order to set a true discontinuity that avoids
+# the doubled value of the LGL nodes at a particular element interface.
+#
+# Note! The errors from the analysis callback are not important but the error
+# for this lake at rest test case `∑|H0-(h+b)|` should be near machine roundoff.
+
+# point to the data we want to augment
+u = Trixi.wrap_array(ode.u0, semi)
+# reset the initial condition
+for element in eachelement(semi.solver, semi.cache)
+ for j in eachnode(semi.solver), i in eachnode(semi.solver)
+ x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, semi.solver, i, j, element)
+ # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor
+ # of unit roundoff to avoid the repeted value from the LGL nodes at at interface.
+ if i == 1
+ x_node = SVector(nextfloat(x_node[1]) , x_node[2])
+ elseif i == nnodes(semi.solver)
+ x_node = SVector(prevfloat(x_node[1]) , x_node[2])
+ end
+ u_node = initial_condition_complex_bottom_well_balanced(x_node, first(tspan), equations)
+ Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element)
+ end
+end
+
+###############################################################################
+# Callbacks
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=false)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=1000,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+stepsize_callback = StepsizeCallback(cfl=2.0)
+
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution,
+ stepsize_callback)
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, SSPRK43(stage_limiter!); dt=1.0,
+ ode_default_options()..., callback=callbacks, adaptive=false);
+
+summary_callback() # print the timer summary
+
+###############################################################################
+# Workaround to compute the well-balancedness error for this particular problem
+# that has two reference water heights. One for a lake to the left of the
+# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the
+# right of the discontinuous bottom topography `H0_lower = 1.5`.
+
+# Declare a special version of the function to compute the lake-at-rest error
+# OBS! The reference water height values are hardcoded for convenience.
+function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations2D)
+ h, _, _, b = u
+
+ # For well-balancedness testing with possible wet/dry regions the reference
+ # water height `H0` accounts for the possibility that the bottom topography
+ # can emerge out of the water as well as for the threshold offset to avoid
+ # division by a "hard" zero water heights as well.
+
+ if x[1] < 0.5
+ H0_wet_dry = max( 2.5 , b + equations.threshold_limiter )
+ else
+ H0_wet_dry = max( 1.5 , b + equations.threshold_limiter )
+ end
+
+ return abs(H0_wet_dry - (h + b))
+end
+
+# point to the data we want to analyze
+u = Trixi.wrap_array(sol[end], semi)
+# Perform the actual integration of the well-balancedness error over the domain
+l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, semi.cache; normalize=true) do u, i, j, element, equations, solver
+ x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, i, j, element)
+ # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor
+ # of unit roundoff to avoid the repeted value from the LGL nodes at at interface.
+ if i == 1
+ x_node = SVector(nextfloat(x_node[1]) , x_node[2])
+ elseif i == nnodes(semi.solver)
+ x_node = SVector(prevfloat(x_node[1]) , x_node[2])
+ end
+ u_local = Trixi.get_node_vars(u, equations, solver, i, j, element)
+ return lake_at_rest_error_two_level(u_local, x_node, equations)
+end
+
+# report the well-balancedness lake-at-rest error to the screen
+println("─"^100)
+println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver),
+ " at final time " * @sprintf("%10.8e", tspan[end]))
+
+@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error))
+@printf(" % 10.8e", l1_well_balance_error)
+println()
+println("─"^100)
diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl
new file mode 100644
index 00000000000..65b0fcae462
--- /dev/null
+++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl
@@ -0,0 +1,139 @@
+
+using Downloads: download
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the shallow water equations
+#
+# TODO: TrixiShallowWater: wet/dry example elixir
+
+
+equations = ShallowWaterEquations2D(gravity_constant=9.81, H0=1.875,
+ threshold_limiter=1e-12, threshold_wet=1e-14)
+
+
+"""
+ initial_condition_three_mounds(x, t, equations::ShallowWaterEquations2D)
+
+Initial condition simulating a dam break. The bottom topography is given by one large and two smaller
+mounds. The mounds are flooded by the water for t > 0. To smooth the discontinuity, a logistic function
+is applied.
+
+The initial conditions is taken from Section 6.3 of the paper:
+- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018)
+ An entropy stable discontinuous Galerkin method for the shallow water equations on
+ curvilinear meshes with wet/dry fronts accelerated by GPUs\n
+ [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038)
+"""
+function initial_condition_three_mounds(x, t, equations::ShallowWaterEquations2D)
+
+ # Set the background values
+ v1 = 0.0
+ v2 = 0.0
+
+ x1, x2 = x
+ M_1 = 1 - 0.1 * sqrt( (x1 - 30.0)^2 + (x2 - 22.5)^2 )
+ M_2 = 1 - 0.1 * sqrt( (x1 - 30.0)^2 + (x2 - 7.5)^2 )
+ M_3 = 2.8 - 0.28 * sqrt( (x1 - 47.5)^2 + (x2 - 15.0)^2 )
+
+ b = max(0.0, M_1, M_2, M_3)
+
+ # use a logistic function to transfer water height value smoothly
+ L = equations.H0 # maximum of function
+ x0 = 8 # center point of function
+ k = -75.0 # sharpness of transfer
+
+ H = max(b, L / (1.0 + exp(-k * (x1 - x0))))
+
+ # Avoid division by zero by adjusting the initial condition with a small dry state threshold
+ # that defaults to 500*eps() ≈ 1e-13 in double precision and is set in the constructor above
+ # for the ShallowWaterEquations struct.
+ H = max(H, b + equations.threshold_limiter)
+ return prim2cons(SVector(H, v1, v2, b), equations)
+end
+
+initial_condition = initial_condition_three_mounds
+
+function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x, t,
+ surface_flux_function, equations::ShallowWaterEquations2D)
+ # Impulse and bottom from inside, height from external state
+ u_outer = SVector(equations.threshold_wet, u_inner[2], u_inner[3], u_inner[4])
+
+ # calculate the boundary flux
+ flux = surface_flux_function(u_inner, u_outer, normal_direction, equations)
+
+ return flux
+end
+
+boundary_conditions = Dict( :Bottom => boundary_condition_slip_wall,
+ :Top => boundary_condition_slip_wall,
+ :Right => boundary_condition_outflow,
+ :Left => boundary_condition_slip_wall )
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, hydrostatic_reconstruction_chen_noelle),
+ flux_nonconservative_chen_noelle)
+
+basis = LobattoLegendreBasis(4)
+
+indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis,
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable=waterheight_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg=volume_flux,
+ volume_flux_fv=surface_flux)
+
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+###############################################################################
+# Get the unstructured quad mesh from a file (downloads the file if not available locally)
+
+default_meshfile = joinpath(@__DIR__, "mesh_three_mound.mesh")
+
+isfile(default_meshfile) || download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh",
+ default_meshfile)
+
+meshfile = default_meshfile
+
+mesh = UnstructuredMesh2D(meshfile)
+
+# Create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver;
+ boundary_conditions=boundary_conditions)
+
+###############################################################################
+# ODE solver
+
+tspan = (0.0, 20.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# Callbacks
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+###############################################################################
+# run the simulation
+
+stage_limiter! = PositivityPreservingLimiterShallowWater(variables=(Trixi.waterheight,))
+
+sol = solve(ode, SSPRK43(stage_limiter!);
+ ode_default_options()..., callback=callbacks);
+summary_callback() # print the timer summary
diff --git a/src/Trixi.jl b/src/Trixi.jl
index 34a1977d4f5..cf6158e29eb 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -162,9 +162,13 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle,
flux_fjordholm_etal, flux_nonconservative_fjordholm_etal, flux_es_fjordholm_etal,
flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal,
hydrostatic_reconstruction_audusse_etal, flux_nonconservative_audusse_etal,
+# TODO: TrixiShallowWater: move anything with "chen_noelle" to new file
+ hydrostatic_reconstruction_chen_noelle, flux_nonconservative_chen_noelle,
+ flux_hll_chen_noelle,
FluxPlusDissipation, DissipationGlobalLaxFriedrichs, DissipationLocalLaxFriedrichs,
FluxLaxFriedrichs, max_abs_speed_naive,
FluxHLL, min_max_speed_naive, min_max_speed_davis, min_max_speed_einfeldt,
+ min_max_speed_chen_noelle,
FluxLMARS,
FluxRotated,
flux_shima_etal_turbo, flux_ranocha_turbo,
@@ -215,6 +219,8 @@ export DG,
VolumeIntegralFluxDifferencing,
VolumeIntegralPureLGLFiniteVolume,
VolumeIntegralShockCapturingHG, IndicatorHennemannGassner,
+# TODO: TrixiShallowWater: move new indicator
+ IndicatorHennemannGassnerShallowWater,
VolumeIntegralUpwind,
SurfaceIntegralWeakForm, SurfaceIntegralStrongForm,
SurfaceIntegralUpwind,
@@ -248,7 +254,8 @@ export ControllerThreeLevel, ControllerThreeLevelCombined,
IndicatorNeuralNetwork, NeuralNetworkPerssonPeraire, NeuralNetworkRayHesthaven,
NeuralNetworkCNN
-export PositivityPreservingLimiterZhangShu
+# TODO: TrixiShallowWater: move new limiter
+export PositivityPreservingLimiterZhangShu, PositivityPreservingLimiterShallowWater
export trixi_include, examples_dir, get_examples, default_example,
default_example_unstructured, ode_default_options
diff --git a/src/callbacks_stage/callbacks_stage.jl b/src/callbacks_stage/callbacks_stage.jl
index 7609f9b341d..ab0f34efb78 100644
--- a/src/callbacks_stage/callbacks_stage.jl
+++ b/src/callbacks_stage/callbacks_stage.jl
@@ -6,4 +6,6 @@
#! format: noindent
include("positivity_zhang_shu.jl")
+# TODO: TrixiShallowWater: move specific limiter file
+include("positivity_shallow_water.jl")
end # @muladd
diff --git a/src/callbacks_stage/positivity_shallow_water.jl b/src/callbacks_stage/positivity_shallow_water.jl
new file mode 100644
index 00000000000..36276026fe9
--- /dev/null
+++ b/src/callbacks_stage/positivity_shallow_water.jl
@@ -0,0 +1,89 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# TODO: TrixiShallowWater: generic wet/dry limiter
+
+"""
+ PositivityPreservingLimiterShallowWater(; variables)
+
+The limiter is specifically designed for the shallow water equations.
+It is applied to all scalar `variables` in their given order
+using the defined `threshold_limiter` from the [`ShallowWaterEquations1D`](@ref) struct
+or the [`ShallowWaterEquations2D`](@ref) struct to determine the minimal acceptable values.
+The order of the `variables` is important and might have a strong influence
+on the robustness.
+
+As opposed to the standard version of the [`PositivityPreservingLimiterZhangShu`](@ref),
+nodes with a water height below the `threshold_limiter` are treated in a special way.
+To avoid numerical problems caused by velocities close to zero,
+the velocity is cut off, such that the node can be identified as "dry". The special feature of the
+`ShallowWaterEquations` used here is that the bottom topography is stored as an additional
+quantity in the solution vector `u`. However, the value of the bottom topography
+should not be changed. That is why, it is not limited.
+
+After the limiting process is applied to all degrees of freedom, for safety reasons,
+the `threshold_limiter` is applied again on all the DG nodes in order to avoid water height below.
+In the case where the cell mean value is below the threshold before applying the limiter,
+there could still be dry nodes afterwards due to the logic of the limiter.
+
+This fully-discrete positivity-preserving limiter is based on the work of
+- Zhang, Shu (2011)
+ Maximum-principle-satisfying and positivity-preserving high-order schemes
+ for conservation laws: survey and new developments
+ [doi: 10.1098/rspa.2011.0153](https://doi.org/10.1098/rspa.2011.0153)
+"""
+struct PositivityPreservingLimiterShallowWater{N, Variables <: NTuple{N, Any}}
+ variables::Variables
+end
+
+function PositivityPreservingLimiterShallowWater(; variables)
+ PositivityPreservingLimiterShallowWater(variables)
+end
+
+function (limiter!::PositivityPreservingLimiterShallowWater)(u_ode, integrator,
+ semi::AbstractSemidiscretization,
+ t)
+ u = wrap_array(u_ode, semi)
+ @trixi_timeit timer() "positivity-preserving limiter" limiter_shallow_water!(u,
+ limiter!.variables,
+ mesh_equations_solver_cache(semi)...)
+end
+
+# Iterate over tuples in a type-stable way using "lispy tuple programming",
+# similar to https://stackoverflow.com/a/55849398:
+# Iterating over tuples of different functions isn't type-stable in general
+# but accessing the first element of a tuple is type-stable. Hence, it's good
+# to process one element at a time and replace iteration by recursion here.
+# Note that you shouldn't use this with too many elements per tuple since the
+# compile times can increase otherwise - but a handful of elements per tuple
+# is definitely fine.
+function limiter_shallow_water!(u, variables::NTuple{N, Any},
+ mesh,
+ equations::Union{ShallowWaterEquations1D,
+ ShallowWaterEquations2D},
+ solver, cache) where {N}
+ variable = first(variables)
+ remaining_variables = Base.tail(variables)
+
+ limiter_shallow_water!(u, equations.threshold_limiter, variable, mesh, equations,
+ solver, cache)
+ limiter_shallow_water!(u, remaining_variables, mesh, equations, solver, cache)
+ return nothing
+end
+
+# terminate the type-stable iteration over tuples
+function limiter_shallow_water!(u, variables::Tuple{},
+ mesh,
+ equations::Union{ShallowWaterEquations1D,
+ ShallowWaterEquations2D},
+ solver, cache)
+ nothing
+end
+
+include("positivity_shallow_water_dg1d.jl")
+include("positivity_shallow_water_dg2d.jl")
+end # @muladd
diff --git a/src/callbacks_stage/positivity_shallow_water_dg1d.jl b/src/callbacks_stage/positivity_shallow_water_dg1d.jl
new file mode 100644
index 00000000000..13c6866e895
--- /dev/null
+++ b/src/callbacks_stage/positivity_shallow_water_dg1d.jl
@@ -0,0 +1,89 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# TODO: TrixiShallowWater: 1D wet/dry limiter should move
+
+function limiter_shallow_water!(u, threshold::Real, variable,
+ mesh::AbstractMesh{1},
+ equations::ShallowWaterEquations1D,
+ dg::DGSEM, cache)
+ @unpack weights = dg.basis
+
+ @threaded for element in eachelement(dg, cache)
+ # determine minimum value
+ value_min = typemax(eltype(u))
+ for i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, element)
+ value_min = min(value_min, variable(u_node, equations))
+ end
+
+ # detect if limiting is necessary
+ value_min < threshold || continue
+
+ # compute mean value
+ u_mean = zero(get_node_vars(u, equations, dg, 1, element))
+ for i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, element)
+ u_mean += u_node * weights[i]
+ end
+ # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2
+ u_mean = u_mean / 2^ndims(mesh)
+
+ # We compute the value directly with the mean values, as we assume that
+ # Jensen's inequality holds (e.g. pressure for compressible Euler equations).
+ value_mean = variable(u_mean, equations)
+ theta = (value_mean - threshold) / (value_mean - value_min)
+ for i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, element)
+
+ # Cut off velocity in case that the waterheight is smaller than the threshold
+
+ h_node, h_v_node, b_node = u_node
+ h_mean, h_v_mean, _ = u_mean # b_mean is not used as b_node must not be overwritten
+
+ # Set them both to zero to apply linear combination correctly
+ if h_node <= threshold
+ h_v_node = zero(eltype(u))
+ h_v_mean = zero(eltype(u))
+ end
+
+ u_node = SVector(h_node, h_v_node, b_node)
+ u_mean = SVector(h_mean, h_v_mean, b_node)
+
+ # When velocity is cut off, the only averaged value is the waterheight,
+ # because the velocity is set to zero and this value is passed.
+ # Otherwise, the velocity is averaged, as well.
+ # Note that the auxiliary bottom topography variable `b` is never limited.
+ set_node_vars!(u, theta * u_node + (1 - theta) * u_mean,
+ equations, dg, i, element)
+ end
+ end
+
+ # "Safety" application of the wet/dry thresholds over all the DG nodes
+ # on the current `element` after the limiting above in order to avoid dry nodes.
+ # If the value_mean < threshold before applying limiter, there
+ # could still be dry nodes afterwards due to logic of the limiting
+ @threaded for element in eachelement(dg, cache)
+ for i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, element)
+
+ h, hv, b = u_node
+
+ if h <= threshold
+ h = threshold
+ hv = zero(eltype(u))
+ end
+
+ u_node = SVector(h, hv, b)
+
+ set_node_vars!(u, u_node, equations, dg, i, element)
+ end
+ end
+
+ return nothing
+end
+end # @muladd
diff --git a/src/callbacks_stage/positivity_shallow_water_dg2d.jl b/src/callbacks_stage/positivity_shallow_water_dg2d.jl
new file mode 100644
index 00000000000..da3a25fdcf4
--- /dev/null
+++ b/src/callbacks_stage/positivity_shallow_water_dg2d.jl
@@ -0,0 +1,90 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# TODO: TrixiShallowWater: 2D wet/dry limiter should move
+
+function limiter_shallow_water!(u, threshold::Real, variable,
+ mesh::AbstractMesh{2},
+ equations::ShallowWaterEquations2D, dg::DGSEM, cache)
+ @unpack weights = dg.basis
+
+ @threaded for element in eachelement(dg, cache)
+ # determine minimum value
+ value_min = typemax(eltype(u))
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+ value_min = min(value_min, variable(u_node, equations))
+ end
+
+ # detect if limiting is necessary
+ value_min < threshold || continue
+
+ # compute mean value
+ u_mean = zero(get_node_vars(u, equations, dg, 1, 1, element))
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+ u_mean += u_node * weights[i] * weights[j]
+ end
+ # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2
+ u_mean = u_mean / 2^ndims(mesh)
+
+ # We compute the value directly with the mean values, as we assume that
+ # Jensen's inequality holds (e.g. pressure for compressible Euler equations).
+ value_mean = variable(u_mean, equations)
+ theta = (value_mean - threshold) / (value_mean - value_min)
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+
+ # Cut off velocity in case that the water height is smaller than the threshold
+
+ h_node, h_v1_node, h_v2_node, b_node = u_node
+ h_mean, h_v1_mean, h_v2_mean, _ = u_mean # b_mean is not used as it must not be overwritten
+
+ if h_node <= threshold
+ h_v1_node = zero(eltype(u))
+ h_v2_node = zero(eltype(u))
+ h_v1_mean = zero(eltype(u))
+ h_v2_mean = zero(eltype(u))
+ end
+
+ u_node = SVector(h_node, h_v1_node, h_v2_node, b_node)
+ u_mean = SVector(h_mean, h_v1_mean, h_v2_mean, b_node)
+
+ # When velocities are cut off, the only averaged value is the water height,
+ # because the velocities are set to zero and this value is passed.
+ # Otherwise, the velocities are averaged, as well.
+ # Note that the auxiliary bottom topography variable `b` is never limited.
+ set_node_vars!(u, theta * u_node + (1 - theta) * u_mean,
+ equations, dg, i, j, element)
+ end
+ end
+
+ # "Safety" application of the wet/dry thresholds over all the DG nodes
+ # on the current `element` after the limiting above in order to avoid dry nodes.
+ # If the value_mean < threshold before applying limiter, there
+ # could still be dry nodes afterwards due to logic of the limiting
+ @threaded for element in eachelement(dg, cache)
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+
+ h, h_v1, h_v2, b = u_node
+
+ if h <= threshold
+ h = threshold
+ h_v1 = zero(eltype(u))
+ h_v2 = zero(eltype(u))
+ end
+
+ u_node = SVector(h, h_v1, h_v2, b)
+
+ set_node_vars!(u, u_node, equations, dg, i, j, element)
+ end
+ end
+
+ return nothing
+end
+end # @muladd
diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl
index abd9d66c490..87010275f2c 100644
--- a/src/equations/numerical_fluxes.jl
+++ b/src/equations/numerical_fluxes.jl
@@ -304,6 +304,29 @@ See [`FluxHLL`](@ref).
"""
const flux_hll = FluxHLL()
+# TODO: TrixiShallowWater: move the chen_noelle flux structure to the new package
+
+# An empty version of the `min_max_speed_chen_noelle` function is declared here
+# in order to create a dimension agnostic version of `flux_hll_chen_noelle`.
+# The full description of this wave speed estimate can be found in the docstrings
+# for `min_max_speed_chen_noelle` in `shallow_water_1d.jl` or `shallow_water_2d.jl`.
+function min_max_speed_chen_noelle end
+
+"""
+ flux_hll_chen_noelle = FluxHLL(min_max_speed_chen_noelle)
+
+An instance of [`FluxHLL`](@ref) specific to the shallow water equations that
+uses the wave speed estimates from [`min_max_speed_chen_noelle`](@ref).
+This HLL flux is guaranteed to have zero numerical mass flux out of a "dry" element,
+maintain positivity of the water height, and satisfy an entropy inequality.
+
+For complete details see Section 2.4 of the following reference
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI: 10.1137/15M1053074](https://doi.org/10.1137/15M1053074)
+"""
+const flux_hll_chen_noelle = FluxHLL(min_max_speed_chen_noelle)
+
"""
flux_shima_etal_turbo(u_ll, u_rr, orientation_or_normal_direction, equations)
diff --git a/src/equations/shallow_water_1d.jl b/src/equations/shallow_water_1d.jl
index c33b31fca81..57bcb1212e1 100644
--- a/src/equations/shallow_water_1d.jl
+++ b/src/equations/shallow_water_1d.jl
@@ -6,7 +6,7 @@
#! format: noindent
@doc raw"""
- ShallowWaterEquations1D(gravity, H0)
+ ShallowWaterEquations1D(; gravity, H0 = 0, threshold_limiter = nothing threshold_wet = nothing)
Shallow water equations (SWE) in one space dimension. The equations are given by
```math
@@ -24,6 +24,12 @@ also defines the total water height as ``H = h + b``.
The additional quantity ``H_0`` is also available to store a reference value for the total water height that
is useful to set initial conditions or test the "lake-at-rest" well-balancedness.
+Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not
+have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is
+used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial
+condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to
+define when the flow is "wet" before calculating the numerical flux.
+
The bottom topography function ``b(x)`` is set inside the initial condition routine
for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography
variable `b` to zero.
@@ -45,16 +51,35 @@ References for the SWE are many but a good introduction is available in Chapter
[DOI: 10.1017/CBO9780511791253](https://doi.org/10.1017/CBO9780511791253)
"""
struct ShallowWaterEquations1D{RealT <: Real} <: AbstractShallowWaterEquations{1, 3}
+ # TODO: TrixiShallowWater: where should the `threshold_limiter` and `threshold_wet` live?
+ # how to "properly" export these constants across the two packages?
gravity::RealT # gravitational constant
H0::RealT # constant "lake-at-rest" total water height
+ # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height,
+ # as a (small) shift on the initial condition and cutoff before the next time step.
+ # Default is 500*eps() which in double precision is ≈1e-13.
+ threshold_limiter::RealT
+ # `threshold_wet` applied on water height to define when the flow is "wet"
+ # before calculating the numerical flux.
+ # Default is 5*eps() which in double precision is ≈1e-15.
+ threshold_wet::RealT
end
# Allow for flexibility to set the gravitational constant within an elixir depending on the
# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values.
# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest"
-# well-balancedness test cases
-function ShallowWaterEquations1D(; gravity_constant, H0 = 0.0)
- ShallowWaterEquations1D(gravity_constant, H0)
+# well-balancedness test cases.
+# Strict default values for thresholds that performed well in many numerical experiments
+function ShallowWaterEquations1D(; gravity_constant, H0 = zero(gravity_constant),
+ threshold_limiter = nothing, threshold_wet = nothing)
+ T = promote_type(typeof(gravity_constant), typeof(H0))
+ if threshold_limiter === nothing
+ threshold_limiter = 500 * eps(T)
+ end
+ if threshold_wet === nothing
+ threshold_wet = 5 * eps(T)
+ end
+ ShallowWaterEquations1D(gravity_constant, H0, threshold_limiter, threshold_wet)
end
have_nonconservative_terms(::ShallowWaterEquations1D) = True()
@@ -307,6 +332,54 @@ Further details on the hydrostatic reconstruction and its motivation can be foun
z)
end
+# TODO: TrixiShallowWater: move wet/dry specific routine
+"""
+ flux_nonconservative_chen_noelle(u_ll, u_rr,
+ orientation::Integer,
+ equations::ShallowWaterEquations1D)
+
+Non-symmetric two-point surface flux that discretizes the nonconservative (source) term.
+The discretization uses the `hydrostatic_reconstruction_chen_noelle` on the conservative
+variables.
+
+Should be used together with [`FluxHydrostaticReconstruction`](@ref) and
+[`hydrostatic_reconstruction_chen_noelle`](@ref) in the surface flux to ensure consistency.
+
+Further details on the hydrostatic reconstruction and its motivation can be found in
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+@inline function flux_nonconservative_chen_noelle(u_ll, u_rr,
+ orientation::Integer,
+ equations::ShallowWaterEquations1D)
+
+ # Pull the water height and bottom topography on the left
+ h_ll, _, b_ll = u_ll
+ h_rr, _, b_rr = u_rr
+
+ H_ll = h_ll + b_ll
+ H_rr = h_rr + b_rr
+
+ b_star = min(max(b_ll, b_rr), min(H_ll, H_rr))
+
+ # Create the hydrostatic reconstruction for the left solution state
+ u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations)
+
+ # Copy the reconstructed water height for easier to read code
+ h_ll_star = u_ll_star[1]
+
+ z = zero(eltype(u_ll))
+ # Includes two parts:
+ # (i) Diagonal (consistent) term from the volume flux that uses `b_ll` to avoid
+ # cross-averaging across a discontinuous bottom topography
+ # (ii) True surface part that uses `h_ll` and `h_ll_star` to handle discontinuous bathymetry
+ return SVector(z,
+ equations.gravity * h_ll * b_ll -
+ equations.gravity * (h_ll_star + h_ll) * (b_ll - b_star),
+ z)
+end
+
"""
flux_fjordholm_etal(u_ll, u_rr, orientation,
equations::ShallowWaterEquations1D)
@@ -381,7 +454,7 @@ end
A particular type of hydrostatic reconstruction on the water height to guarantee well-balancedness
for a general bottom topography [`ShallowWaterEquations1D`](@ref). The reconstructed solution states
-`u_ll_star` and `u_rr_star` variables are used to evaluate the surface numerical flux at the interface.
+`u_ll_star` and `u_rr_star` variables are then used to evaluate the surface numerical flux at the interface.
Use in combination with the generic numerical flux routine [`FluxHydrostaticReconstruction`](@ref).
Further details on this hydrostatic reconstruction and its motivation can be found in
@@ -410,6 +483,67 @@ Further details on this hydrostatic reconstruction and its motivation can be fou
return u_ll_star, u_rr_star
end
+# TODO: TrixiShallowWater: move wet/dry specific routine
+"""
+ hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations1D)
+
+A particular type of hydrostatic reconstruction of the water height to guarantee well-balancedness
+for a general bottom topography of the [`ShallowWaterEquations1D`](@ref). The reconstructed solution states
+`u_ll_star` and `u_rr_star` variables are used to evaluate the surface numerical flux at the interface.
+The key idea is a linear reconstruction of the bottom and water height at the interfaces using subcells.
+Use in combination with the generic numerical flux routine [`FluxHydrostaticReconstruction`](@ref).
+
+Further details on this hydrostatic reconstruction and its motivation can be found in
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+@inline function hydrostatic_reconstruction_chen_noelle(u_ll, u_rr,
+ equations::ShallowWaterEquations1D)
+ # Unpack left and right water heights and bottom topographies
+ h_ll, _, b_ll = u_ll
+ h_rr, _, b_rr = u_rr
+
+ # Get the velocities on either side
+ v_ll = velocity(u_ll, equations)
+ v_rr = velocity(u_rr, equations)
+
+ H_ll = b_ll + h_ll
+ H_rr = b_rr + h_rr
+
+ b_star = min(max(b_ll, b_rr), min(H_ll, H_rr))
+
+ # Compute the reconstructed water heights
+ h_ll_star = min(H_ll - b_star, h_ll)
+ h_rr_star = min(H_rr - b_star, h_rr)
+
+ # Set the water height to be at least the value stored in the variable threshold after
+ # the hydrostatic reconstruction is applied and before the numerical flux is calculated
+ # to avoid numerical problem with arbitrary small values. Interfaces with a water height
+ # lower or equal to the threshold can be declared as dry.
+ # The default value for `threshold_wet` is ≈ 5*eps(), or 1e-15 in double precision, is set
+ # in the `ShallowWaterEquations1D` struct. This threshold value can be changed in the constructor
+ # call of this equation struct in an elixir.
+ threshold = equations.threshold_wet
+
+ if (h_ll_star <= threshold)
+ h_ll_star = threshold
+ v_ll = zero(v_ll)
+ end
+
+ if (h_rr_star <= threshold)
+ h_rr_star = threshold
+ v_rr = zero(v_rr)
+ end
+
+ # Create the conservative variables using the reconstruted water heights
+ u_ll_star = SVector(h_ll_star, h_ll_star * v_ll, b_ll)
+ u_rr_star = SVector(h_rr_star, h_rr_star * v_rr, b_rr)
+
+ return u_ll_star, u_rr_star
+end
+
# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the
# maximum velocity magnitude plus the maximum speed of sound
@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer,
@@ -474,6 +608,39 @@ end
return λ_min, λ_max
end
+# TODO: TrixiShallowWater: move wet/dry specific routine
+"""
+ min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations1D)
+
+The approximated speeds for the HLL type numerical flux used by Chen and Noelle for their
+hydrostatic reconstruction. As they state in the paper, these speeds are chosen for the numerical
+flux to ensure positivity and to satisfy an entropy inequality.
+
+Further details on this hydrostatic reconstruction and its motivation can be found in
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+@inline function min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations1D)
+ # Get the velocity quantities
+ v_ll = velocity(u_ll, equations)
+ v_rr = velocity(u_rr, equations)
+
+ # Calculate the wave celerity on the left and right
+ h_ll = waterheight(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+
+ a_ll = sqrt(equations.gravity * h_ll)
+ a_rr = sqrt(equations.gravity * h_rr)
+
+ λ_min = min(v_ll - a_ll, v_rr - a_rr, zero(eltype(u_ll)))
+ λ_max = max(v_ll + a_ll, v_rr + a_rr, zero(eltype(u_ll)))
+
+ return λ_min, λ_max
+end
+
# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
equations::ShallowWaterEquations1D)
@@ -636,9 +803,20 @@ end
end
# Calculate the error for the "lake-at-rest" test case where H = h+b should
-# be a constant value over time
+# be a constant value over time. Note, assumes there is a single reference
+# water height `H0` with which to compare.
+#
+# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need
+# to modify or have different versions of the `lake_at_rest_error` function
@inline function lake_at_rest_error(u, equations::ShallowWaterEquations1D)
h, _, b = u
- return abs(equations.H0 - (h + b))
+
+ # For well-balancedness testing with possible wet/dry regions the reference
+ # water height `H0` accounts for the possibility that the bottom topography
+ # can emerge out of the water as well as for the threshold offset to avoid
+ # division by a "hard" zero water heights as well.
+ H0_wet_dry = max(equations.H0, b + equations.threshold_limiter)
+
+ return abs(H0_wet_dry - (h + b))
end
end # @muladd
diff --git a/src/equations/shallow_water_2d.jl b/src/equations/shallow_water_2d.jl
index 9e227cd4a77..a81fddeed49 100644
--- a/src/equations/shallow_water_2d.jl
+++ b/src/equations/shallow_water_2d.jl
@@ -6,7 +6,7 @@
#! format: noindent
@doc raw"""
- ShallowWaterEquations2D(gravity, H0)
+ ShallowWaterEquations2D(; gravity, H0 = 0, threshold_limiter = nothing, threshold_wet = nothing)
Shallow water equations (SWE) in two space dimensions. The equations are given by
```math
@@ -27,6 +27,12 @@ also defines the total water height as ``H = h + b``.
The additional quantity ``H_0`` is also available to store a reference value for the total water height that
is useful to set initial conditions or test the "lake-at-rest" well-balancedness.
+Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not
+have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is
+used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial
+condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to
+define when the flow is "wet" before calculating the numerical flux.
+
The bottom topography function ``b(x,y)`` is set inside the initial condition routine
for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography
variable `b` to zero.
@@ -48,16 +54,35 @@ References for the SWE are many but a good introduction is available in Chapter
[DOI: 10.1017/CBO9780511791253](https://doi.org/10.1017/CBO9780511791253)
"""
struct ShallowWaterEquations2D{RealT <: Real} <: AbstractShallowWaterEquations{2, 4}
+ # TODO: TrixiShallowWater: where should the `threshold_limiter` and `threshold_wet` live?
+ # how to "properly" export these constants across the two packages?
gravity::RealT # gravitational constant
H0::RealT # constant "lake-at-rest" total water height
+ # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height,
+ # as a (small) shift on the initial condition and cutoff before the next time step.
+ # Default is 500*eps() which in double precision is ≈1e-13.
+ threshold_limiter::RealT
+ # `threshold_wet` applied on water height to define when the flow is "wet"
+ # before calculating the numerical flux.
+ # Default is 5*eps() which in double precision is ≈1e-15.
+ threshold_wet::RealT
end
# Allow for flexibility to set the gravitational constant within an elixir depending on the
# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values.
# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest"
-# well-balancedness test cases
-function ShallowWaterEquations2D(; gravity_constant, H0 = 0.0)
- ShallowWaterEquations2D(gravity_constant, H0)
+# well-balancedness test cases.
+# Strict default values for thresholds that performed well in many numerical experiments
+function ShallowWaterEquations2D(; gravity_constant, H0 = zero(gravity_constant),
+ threshold_limiter = nothing, threshold_wet = nothing)
+ T = promote_type(typeof(gravity_constant), typeof(H0))
+ if threshold_limiter === nothing
+ threshold_limiter = 500 * eps(T)
+ end
+ if threshold_wet === nothing
+ threshold_wet = 5 * eps(T)
+ end
+ ShallowWaterEquations2D(gravity_constant, H0, threshold_limiter, threshold_wet)
end
have_nonconservative_terms(::ShallowWaterEquations2D) = True()
@@ -431,6 +456,69 @@ Further details for the hydrostatic reconstruction and its motivation can be fou
return u_ll_star, u_rr_star
end
+# TODO: TrixiShallowWater: move wet/dry specific routine
+"""
+ hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations2D)
+
+A particular type of hydrostatic reconstruction of the water height to guarantee well-balancedness
+for a general bottom topography of the [`ShallowWaterEquations2D`](@ref). The reconstructed solution states
+`u_ll_star` and `u_rr_star` variables are then used to evaluate the surface numerical flux at the interface.
+The key idea is a linear reconstruction of the bottom and water height at the interfaces using subcells.
+Use in combination with the generic numerical flux routine [`FluxHydrostaticReconstruction`](@ref).
+
+Further details on this hydrostatic reconstruction and its motivation can be found in
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+@inline function hydrostatic_reconstruction_chen_noelle(u_ll, u_rr,
+ equations::ShallowWaterEquations2D)
+ # Unpack left and right water heights and bottom topographies
+ h_ll, _, _, b_ll = u_ll
+ h_rr, _, _, b_rr = u_rr
+
+ # Get the velocities on either side
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ H_ll = b_ll + h_ll
+ H_rr = b_rr + h_rr
+
+ b_star = min(max(b_ll, b_rr), min(H_ll, H_rr))
+
+ # Compute the reconstructed water heights
+ h_ll_star = min(H_ll - b_star, h_ll)
+ h_rr_star = min(H_rr - b_star, h_rr)
+
+ # Set the water height to be at least the value stored in the variable threshold after
+ # the hydrostatic reconstruction is applied and before the numerical flux is calculated
+ # to avoid numerical problem with arbitrary small values. Interfaces with a water height
+ # lower or equal to the threshold can be declared as dry.
+ # The default value for `threshold_wet` is ≈5*eps(), or 1e-15 in double precision, is set
+ # in the `ShallowWaterEquations2D` struct. This threshold value can be changed in the constructor
+ # call of this equation struct in an elixir.
+ threshold = equations.threshold_wet
+
+ if (h_ll_star <= threshold)
+ h_ll_star = threshold
+ v1_ll = zero(v1_ll)
+ v2_ll = zero(v2_ll)
+ end
+
+ if (h_rr_star <= threshold)
+ h_rr_star = threshold
+ v1_rr = zero(v1_rr)
+ v2_rr = zero(v2_rr)
+ end
+
+ # Create the conservative variables using the reconstruted water heights
+ u_ll_star = SVector(h_ll_star, h_ll_star * v1_ll, h_ll_star * v2_ll, b_ll)
+ u_rr_star = SVector(h_rr_star, h_rr_star * v1_rr, h_rr_star * v2_rr, b_rr)
+
+ return u_ll_star, u_rr_star
+end
+
"""
flux_nonconservative_audusse_etal(u_ll, u_rr, orientation::Integer,
equations::ShallowWaterEquations2D)
@@ -516,6 +604,104 @@ end
return SVector(f1, f2, f3, f4)
end
+# TODO: TrixiShallowWater: move wet/dry specific routine
+"""
+ flux_nonconservative_chen_noelle(u_ll, u_rr,
+ orientation::Integer,
+ equations::ShallowWaterEquations2D)
+ flux_nonconservative_chen_noelle(u_ll, u_rr,
+ normal_direction_ll ::AbstractVector,
+ normal_direction_average ::AbstractVector,
+ equations::ShallowWaterEquations2D)
+
+Non-symmetric two-point surface flux that discretizes the nonconservative (source) term.
+The discretization uses the [`hydrostatic_reconstruction_chen_noelle`](@ref) on the conservative
+variables.
+
+Should be used together with [`FluxHydrostaticReconstruction`](@ref) and
+[`hydrostatic_reconstruction_chen_noelle`](@ref) in the surface flux to ensure consistency.
+
+Further details on the hydrostatic reconstruction and its motivation can be found in
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+@inline function flux_nonconservative_chen_noelle(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations2D)
+ # Pull the water height and bottom topography on the left
+ h_ll, _, _, b_ll = u_ll
+ h_rr, _, _, b_rr = u_rr
+
+ H_ll = h_ll + b_ll
+ H_rr = h_rr + b_rr
+
+ b_star = min(max(b_ll, b_rr), min(H_ll, H_rr))
+
+ # Create the hydrostatic reconstruction for the left solution state
+ u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations)
+
+ # Copy the reconstructed water height for easier to read code
+ h_ll_star = u_ll_star[1]
+
+ z = zero(eltype(u_ll))
+ # Includes two parts:
+ # (i) Diagonal (consistent) term from the volume flux that uses `b_ll` to avoid
+ # cross-averaging across a discontinuous bottom topography
+ # (ii) True surface part that uses `h_ll` and `h_ll_star` to handle discontinuous bathymetry
+ g = equations.gravity
+ if orientation == 1
+ f = SVector(z,
+ g * h_ll * b_ll - g * (h_ll_star + h_ll) * (b_ll - b_star),
+ z, z)
+ else # orientation == 2
+ f = SVector(z, z,
+ g * h_ll * b_ll - g * (h_ll_star + h_ll) * (b_ll - b_star),
+ z)
+ end
+
+ return f
+end
+
+@inline function flux_nonconservative_chen_noelle(u_ll, u_rr,
+ normal_direction_ll::AbstractVector,
+ normal_direction_average::AbstractVector,
+ equations::ShallowWaterEquations2D)
+ # Pull the water height and bottom topography on the left
+ h_ll, _, _, b_ll = u_ll
+ h_rr, _, _, b_rr = u_rr
+
+ H_ll = h_ll + b_ll
+ H_rr = h_rr + b_rr
+
+ b_star = min(max(b_ll, b_rr), min(H_ll, H_rr))
+
+ # Create the hydrostatic reconstruction for the left solution state
+ u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations)
+
+ # Copy the reconstructed water height for easier to read code
+ h_ll_star = u_ll_star[1]
+
+ # Comes in two parts:
+ # (i) Diagonal (consistent) term from the volume flux that uses `normal_direction_average`
+ # but we use `b_ll` to avoid cross-averaging across a discontinuous bottom topography
+
+ f2 = normal_direction_average[1] * equations.gravity * h_ll * b_ll
+ f3 = normal_direction_average[2] * equations.gravity * h_ll * b_ll
+
+ # (ii) True surface part that uses `normal_direction_ll`, `h_ll` and `h_ll_star`
+ # to handle discontinuous bathymetry
+
+ f2 -= normal_direction_ll[1] * equations.gravity * (h_ll_star + h_ll) *
+ (b_ll - b_star)
+ f3 -= normal_direction_ll[2] * equations.gravity * (h_ll_star + h_ll) *
+ (b_ll - b_star)
+
+ # First and last equations do not have a nonconservative flux
+ f1 = f4 = zero(eltype(u_ll))
+
+ return SVector(f1, f2, f3, f4)
+end
+
"""
flux_fjordholm_etal(u_ll, u_rr, orientation_or_normal_direction,
equations::ShallowWaterEquations2D)
@@ -762,6 +948,67 @@ end
return λ_min, λ_max
end
+# TODO: TrixiShallowWater: move wet/dry specific routine
+"""
+ min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations2D)
+ min_max_speed_chen_noelle(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::ShallowWaterEquations2D)
+
+Special estimate of the minimal and maximal wave speed of the shallow water equations for
+the left and right states `u_ll, u_rr`. These approximate speeds are used for the HLL-type
+numerical flux [`flux_hll_chen_noelle`](@ref). These wave speed estimates
+together with a particular hydrostatic reconstruction technique guarantee
+that the numerical flux is positive and satisfies an entropy inequality.
+
+Further details on this hydrostatic reconstruction and its motivation can be found in
+the reference below. The definition of the wave speeds are given in Equation (2.20).
+- Guoxian Chen and Sebastian Noelle (2017)
+ A new hydrostatic reconstruction scheme based on subcell reconstructions
+ [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074)
+"""
+@inline function min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ a_ll = sqrt(equations.gravity * h_ll)
+ a_rr = sqrt(equations.gravity * h_rr)
+
+ if orientation == 1 # x-direction
+ λ_min = min(v1_ll - a_ll, v1_rr - a_rr, zero(eltype(u_ll)))
+ λ_max = max(v1_ll + a_ll, v1_rr + a_rr, zero(eltype(u_ll)))
+ else # y-direction
+ λ_min = min(v2_ll - a_ll, v2_rr - a_rr, zero(eltype(u_ll)))
+ λ_max = max(v2_ll + a_ll, v2_rr + a_rr, zero(eltype(u_ll)))
+ end
+
+ return λ_min, λ_max
+end
+
+@inline function min_max_speed_chen_noelle(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::ShallowWaterEquations2D)
+ h_ll = waterheight(u_ll, equations)
+ v1_ll, v2_ll = velocity(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ v1_rr, v2_rr = velocity(u_rr, equations)
+
+ v_normal_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2]
+ v_normal_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
+
+ norm_ = norm(normal_direction)
+
+ a_ll = sqrt(equations.gravity * h_ll) * norm_
+ a_rr = sqrt(equations.gravity * h_rr) * norm_
+
+ λ_min = min(v_normal_ll - a_ll, v_normal_rr - a_rr, zero(eltype(u_ll)))
+ λ_max = max(v_normal_ll + a_ll, v_normal_rr + a_rr, zero(eltype(u_ll)))
+
+ return λ_min, λ_max
+end
+
# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes
@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer,
equations::ShallowWaterEquations2D)
@@ -1008,9 +1255,20 @@ end
end
# Calculate the error for the "lake-at-rest" test case where H = h+b should
-# be a constant value over time
+# be a constant value over time. Note, assumes there is a single reference
+# water height `H0` with which to compare.
+#
+# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need
+# to modify or have different versions of the `lake_at_rest_error` function
@inline function lake_at_rest_error(u, equations::ShallowWaterEquations2D)
h, _, _, b = u
- return abs(equations.H0 - (h + b))
+
+ # For well-balancedness testing with possible wet/dry regions the reference
+ # water height `H0` accounts for the possibility that the bottom topography
+ # can emerge out of the water as well as for the threshold offset to avoid
+ # division by a "hard" zero water heights as well.
+ H0_wet_dry = max(equations.H0, b + equations.threshold_limiter)
+
+ return abs(H0_wet_dry - (h + b))
end
end # @muladd
diff --git a/src/equations/shallow_water_two_layer_1d.jl b/src/equations/shallow_water_two_layer_1d.jl
index e126eec7c25..4b64481cca3 100644
--- a/src/equations/shallow_water_two_layer_1d.jl
+++ b/src/equations/shallow_water_two_layer_1d.jl
@@ -5,6 +5,8 @@
@muladd begin
#! format: noindent
+# TODO: TrixiShallowWater: 1D two layer equations should move to new package
+
@doc raw"""
ShallowWaterTwoLayerEquations1D(gravity, H0, rho_upper, rho_lower)
diff --git a/src/equations/shallow_water_two_layer_2d.jl b/src/equations/shallow_water_two_layer_2d.jl
index a54831c711f..87249e91948 100644
--- a/src/equations/shallow_water_two_layer_2d.jl
+++ b/src/equations/shallow_water_two_layer_2d.jl
@@ -5,48 +5,50 @@
@muladd begin
#! format: noindent
+# TODO: TrixiShallowWater: 2D two layer equations should move to new package
+
@doc raw"""
ShallowWaterTwoLayerEquations2D(gravity, H0, rho_upper, rho_lower)
Two-Layer Shallow water equations (2LSWE) in two space dimension. The equations are given by
```math
\begin{alignat*}{8}
-&\frac{\partial}{\partial t}h_{upper}
+&\frac{\partial}{\partial t}h_{upper}
&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}\right)
-&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}\right) \quad
+&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}\right) \quad
&&= \quad 0 \\
-&\frac{\partial}{\partial t}\left(h_{upper} v_{1,upper}\right)
-&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}^2 + \frac{gh_{upper}^2}{2}\right)
-&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{1,upper} v_{2,upper}\right) \quad
+&\frac{\partial}{\partial t}\left(h_{upper} v_{1,upper}\right)
+&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}^2 + \frac{gh_{upper}^2}{2}\right)
+&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{1,upper} v_{2,upper}\right) \quad
&&= -gh_{upper}\frac{\partial}{\partial x}\left(b+h_{lower}\right) \\
-&\frac{\partial}{\partial t}\left(h_{upper} v_{2,upper}\right)
-&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper} v_{2,upper}\right)
-&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}^2 + \frac{gh_{upper}^2}{2}\right)
+&\frac{\partial}{\partial t}\left(h_{upper} v_{2,upper}\right)
+&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper} v_{2,upper}\right)
+&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}^2 + \frac{gh_{upper}^2}{2}\right)
&&= -gh_{upper}\frac{\partial}{\partial y}\left(b+h_{lower}\right)\\
-&\frac{\partial}{\partial t}h_{lower}
-&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}\right)
-&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}\right)
+&\frac{\partial}{\partial t}h_{lower}
+&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}\right)
+&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}\right)
&&= \quad 0 \\
-&\frac{\partial}{\partial t}\left(h_{lower} v_{1,lower}\right)
-&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}^2 + \frac{gh_{lower}^2}{2}\right)
-&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{1,lower} v_{2,lower}\right)
+&\frac{\partial}{\partial t}\left(h_{lower} v_{1,lower}\right)
+&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}^2 + \frac{gh_{lower}^2}{2}\right)
+&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{1,lower} v_{2,lower}\right)
&&= -gh_{lower}\frac{\partial}{\partial x}\left(b+\frac{\rho_{upper}}{\rho_{lower}} h_{upper}\right)\\
-&\frac{\partial}{\partial t}\left(h_{lower} v_{2,lower}\right)
-&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower} v_{2,lower}\right)
-&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}^2 + \frac{gh_{lower}^2}{2}\right)
+&\frac{\partial}{\partial t}\left(h_{lower} v_{2,lower}\right)
+&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower} v_{2,lower}\right)
+&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}^2 + \frac{gh_{lower}^2}{2}\right)
&&= -gh_{lower}\frac{\partial}{\partial y}\left(b+\frac{\rho_{upper}}{\rho_{lower}} h_{upper}\right)
\end{alignat*}
```
-The unknown quantities of the 2LSWE are the water heights of the lower layer ``h_{lower}`` and the
-upper
+The unknown quantities of the 2LSWE are the water heights of the lower layer ``h_{lower}`` and the
+upper
layer ``h_{upper}`` and the respective velocities in x-direction ``v_{1,lower}`` and ``v_{1,upper}`` and in y-direction
-``v_{2,lower}`` and ``v_{2,upper}``. The gravitational constant is denoted by `g`, the layer densitites by
-``\rho_{upper}``and ``\rho_{lower}`` and the (possibly) variable bottom topography function by ``b(x)``.
-Conservative variable water height ``h_{lower}`` is measured from the bottom topography ``b`` and ``h_{upper}``
-relative to ``h_{lower}``, therefore one also defines the total water heights as ``H_{lower} = h_{lower} + b`` and
+``v_{2,lower}`` and ``v_{2,upper}``. The gravitational constant is denoted by `g`, the layer densitites by
+``\rho_{upper}``and ``\rho_{lower}`` and the (possibly) variable bottom topography function by ``b(x)``.
+Conservative variable water height ``h_{lower}`` is measured from the bottom topography ``b`` and ``h_{upper}``
+relative to ``h_{lower}``, therefore one also defines the total water heights as ``H_{lower} = h_{lower} + b`` and
``H_{upper} = h_{upper} + h_{lower} + b``.
-The densities must be chosen such that ``\rho_{upper} < \rho_{lower}``, to make sure that the heavier fluid
+The densities must be chosen such that ``\rho_{upper} < \rho_{lower}``, to make sure that the heavier fluid
``\rho_{lower}`` is in the bottom layer and the lighter fluid ``\rho_{upper}`` in the upper layer.
The additional quantity ``H_0`` is also available to store a reference value for the total water
@@ -55,13 +57,13 @@ height that is useful to set initial conditions or test the "lake-at-rest" well-
The bottom topography function ``b(x)`` is set inside the initial condition routine
for a particular problem setup.
-In addition to the unknowns, Trixi currently stores the bottom topography values at the
-approximation points despite being fixed in time. This is done for convenience of computing the
-bottom topography gradients on the fly during the approximation as well as computing auxiliary
+In addition to the unknowns, Trixi currently stores the bottom topography values at the
+approximation points despite being fixed in time. This is done for convenience of computing the
+bottom topography gradients on the fly during the approximation as well as computing auxiliary
quantities like the total water height ``H`` or the entropy variables.
This affects the implementation and use of these equations in various ways:
* The flux values corresponding to the bottom topography must be zero.
-* The bottom topography values must be included when defining initial conditions, boundary
+* The bottom topography values must be included when defining initial conditions, boundary
conditions or source terms.
* [`AnalysisCallback`](@ref) analyzes this variable.
* Trixi's visualization tools will visualize the bottom topography by default.
@@ -113,7 +115,7 @@ end
initial_condition_convergence_test(x, t, equations::ShallowWaterTwoLayerEquations2D)
A smooth initial condition used for convergence tests in combination with
-[`source_terms_convergence_test`](@ref). Constants must be set to ``rho_{upper} = 0.9``,
+[`source_terms_convergence_test`](@ref). Constants must be set to ``rho_{upper} = 0.9``,
``rho_{lower} = 1.0``, ``g = 10.0``.
"""
function initial_condition_convergence_test(x, t,
@@ -141,7 +143,7 @@ Source terms used for convergence tests in combination with
"""
@inline function source_terms_convergence_test(u, x, t,
equations::ShallowWaterTwoLayerEquations2D)
- # Same settings as in `initial_condition_convergence_test`.
+ # Same settings as in `initial_condition_convergence_test`.
# some constants are chosen such that the function is periodic on the domain [0,sqrt(2)]^2]
ω = 2.0 * pi * sqrt(2.0)
@@ -325,7 +327,7 @@ end
Non-symmetric two-point volume flux discretizing the nonconservative (source) term
that contains the gradient of the bottom topography [`ShallowWaterTwoLayerEquations2D`](@ref) and an
-additional term that couples the momentum of both layers. This is a slightly modified version
+additional term that couples the momentum of both layers. This is a slightly modified version
to account for the additional source term compared to the standard SWE described in the paper.
Further details are available in the paper:
@@ -345,7 +347,7 @@ Further details are available in the paper:
z = zero(eltype(u_ll))
# Bottom gradient nonconservative term: (0, g*h_upper*(b + h_lower)_x, g*h_upper*(b + h_lower)_y ,
- # 0, g*h_lower*(b + r*h_upper)_x,
+ # 0, g*h_lower*(b + r*h_upper)_x,
# g*h_lower*(b + r*h_upper)_y, 0)
if orientation == 1
f = SVector(z,
@@ -397,8 +399,8 @@ end
!!! warning "Experimental code"
This numerical flux is experimental and may change in any future release.
-Non-symmetric two-point surface flux discretizing the nonconservative (source) term that contains
-the gradients of the bottom topography and an additional term that couples the momentum of both
+Non-symmetric two-point surface flux discretizing the nonconservative (source) term that contains
+the gradients of the bottom topography and an additional term that couples the momentum of both
layers [`ShallowWaterTwoLayerEquations2D`](@ref).
Further details are available in the paper:
@@ -506,13 +508,13 @@ end
flux_fjordholm_etal(u_ll, u_rr, orientation,
equations::ShallowWaterTwoLayerEquations2D)
-Total energy conservative (mathematical entropy for two-layer shallow water equations). When the
-bottom topography is nonzero this should only be used as a surface flux otherwise the scheme will
+Total energy conservative (mathematical entropy for two-layer shallow water equations). When the
+bottom topography is nonzero this should only be used as a surface flux otherwise the scheme will
not be well-balanced. For well-balancedness in the volume flux use [`flux_wintermeyer_etal`](@ref).
Details are available in Eq. (4.1) in the paper:
- Ulrik S. Fjordholm, Siddhartha Mishra and Eitan Tadmor (2011)
- Well-balanced and energy stable schemes for the shallow water equations with discontinuous
+ Well-balanced and energy stable schemes for the shallow water equations with discontinuous
topography [DOI: 10.1016/j.jcp.2011.03.042](https://doi.org/10.1016/j.jcp.2011.03.042)
and the application to two layers is shown in the paper:
- Ulrik Skre Fjordholm (2012)
@@ -606,11 +608,11 @@ end
"""
flux_wintermeyer_etal(u_ll, u_rr, orientation,
equations::ShallowWaterTwoLayerEquations2D)
-
+
Total energy conservative (mathematical entropy for two-layer shallow water equations) split form.
When the bottom topography is nonzero this scheme will be well-balanced when used as a `volume_flux`.
The `surface_flux` should still use, e.g., [`flux_fjordholm_etal`](@ref). To obtain the flux for the
-two-layer shallow water equations the flux that is described in the paper for the normal shallow
+two-layer shallow water equations the flux that is described in the paper for the normal shallow
water equations is used within each layer.
Further details are available in Theorem 1 of the paper:
@@ -696,9 +698,9 @@ end
flux_es_fjordholm_etal(u_ll, u_rr, orientation_or_normal_direction,
equations::ShallowWaterTwoLayerEquations1D)
-Entropy stable surface flux for the two-layer shallow water equations. Uses the entropy conservative
+Entropy stable surface flux for the two-layer shallow water equations. Uses the entropy conservative
[`flux_fjordholm_etal`](@ref) and adds a Lax-Friedrichs type dissipation dependent on the jump of entropy
-variables.
+variables.
Further details are available in the paper:
- Ulrik Skre Fjordholm (2012)
@@ -723,7 +725,7 @@ formulation.
q_rr = cons2entropy(u_rr, equations)
q_ll = cons2entropy(u_ll, equations)
- # Average values from left and right
+ # Average values from left and right
u_avg = (u_ll + u_rr) / 2
# Introduce variables for better readability
@@ -791,10 +793,10 @@ formulation.
end
# Calculate approximation for maximum wave speed for local Lax-Friedrichs-type dissipation as the
-# maximum velocity magnitude plus the maximum speed of sound. This function uses approximate
-# eigenvalues using the speed of the barotropic mode as there is no simple way to calculate them
-# analytically.
-#
+# maximum velocity magnitude plus the maximum speed of sound. This function uses approximate
+# eigenvalues using the speed of the barotropic mode as there is no simple way to calculate them
+# analytically.
+#
# A good overview of the derivation is given in:
# - Jonas Nycander, Andrew McC. Hogg, Leela M. Frankcombe (2008)
# Open boundary conditions for nonlinear channel Flows
@@ -914,7 +916,7 @@ end
# Convert conservative variables to entropy variables
# Note, only the first four are the entropy variables, the fifth entry still just carries the bottom
-# topography values for convenience.
+# topography values for convenience.
# In contrast to general usage the entropy variables are denoted with q instead of w, because w is
# already used for velocity in y-Direction
@inline function cons2entropy(u, equations::ShallowWaterTwoLayerEquations2D)
diff --git a/src/solvers/dgsem_tree/indicators.jl b/src/solvers/dgsem_tree/indicators.jl
index b8f8a796f2b..4b83e9c1a9e 100644
--- a/src/solvers/dgsem_tree/indicators.jl
+++ b/src/solvers/dgsem_tree/indicators.jl
@@ -92,6 +92,77 @@ end
function Base.show(io::IO, ::MIME"text/plain", indicator::IndicatorHennemannGassner)
@nospecialize indicator # reduce precompilation time
+ setup = [
+ "indicator variable" => indicator.variable,
+ "max. α" => indicator.alpha_max,
+ "min. α" => indicator.alpha_min,
+ "smooth α" => (indicator.alpha_smooth ? "yes" : "no"),
+ ]
+ summary_box(io, "IndicatorHennemannGassner", setup)
+end
+
+# TODO: TrixiShallowWater: move the new indicator and all associated routines to the new package
+"""
+ IndicatorHennemannGassnerShallowWater(equations::AbstractEquations, basis;
+ alpha_max=0.5,
+ alpha_min=0.001,
+ alpha_smooth=true,
+ variable)
+
+Modified version of the [`IndicatorHennemannGassner`](@ref)
+indicator used for shock-capturing for shallow water equations. After
+the element-wise values for the blending factors are computed an additional check
+is made to see if the element is partially wet. In this case, partially wet elements
+are set to use the pure finite volume scheme that is guaranteed to be well-balanced
+for this wet/dry transition state of the flow regime.
+
+See also [`VolumeIntegralShockCapturingHG`](@ref).
+
+## References
+
+- Hennemann, Gassner (2020)
+ "A provably entropy stable subcell shock capturing approach for high order split form DG"
+ [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044)
+"""
+struct IndicatorHennemannGassnerShallowWater{RealT <: Real, Variable, Cache} <:
+ AbstractIndicator
+ alpha_max::RealT
+ alpha_min::RealT
+ alpha_smooth::Bool
+ variable::Variable
+ cache::Cache
+end
+
+# this method is used when the indicator is constructed as for shock-capturing volume integrals
+# of the shallow water equations
+# It modifies the shock-capturing indicator to use full FV method in dry cells
+function IndicatorHennemannGassnerShallowWater(equations::AbstractShallowWaterEquations,
+ basis;
+ alpha_max = 0.5,
+ alpha_min = 0.001,
+ alpha_smooth = true,
+ variable)
+ alpha_max, alpha_min = promote(alpha_max, alpha_min)
+ cache = create_cache(IndicatorHennemannGassner, equations, basis)
+ IndicatorHennemannGassnerShallowWater{typeof(alpha_max), typeof(variable),
+ typeof(cache)}(alpha_max, alpha_min,
+ alpha_smooth, variable, cache)
+end
+
+function Base.show(io::IO, indicator::IndicatorHennemannGassnerShallowWater)
+ @nospecialize indicator # reduce precompilation time
+
+ print(io, "IndicatorHennemannGassnerShallowWater(")
+ print(io, indicator.variable)
+ print(io, ", alpha_max=", indicator.alpha_max)
+ print(io, ", alpha_min=", indicator.alpha_min)
+ print(io, ", alpha_smooth=", indicator.alpha_smooth)
+ print(io, ")")
+end
+
+function Base.show(io::IO, ::MIME"text/plain",
+ indicator::IndicatorHennemannGassnerShallowWater)
+ @nospecialize indicator # reduce precompilation time
if get(io, :compact, false)
show(io, indicator)
@@ -102,7 +173,7 @@ function Base.show(io::IO, ::MIME"text/plain", indicator::IndicatorHennemannGass
"min. α" => indicator.alpha_min,
"smooth α" => (indicator.alpha_smooth ? "yes" : "no"),
]
- summary_box(io, "IndicatorHennemannGassner", setup)
+ summary_box(io, "IndicatorHennemannGassnerShallowWater", setup)
end
end
diff --git a/src/solvers/dgsem_tree/indicators_1d.jl b/src/solvers/dgsem_tree/indicators_1d.jl
index e722584bb2e..8b57348861c 100644
--- a/src/solvers/dgsem_tree/indicators_1d.jl
+++ b/src/solvers/dgsem_tree/indicators_1d.jl
@@ -24,6 +24,115 @@ function create_cache(typ::Type{IndicatorHennemannGassner}, mesh,
create_cache(typ, equations, dg.basis)
end
+# Modified indicator for ShallowWaterEquations1D to apply full FV method on cells
+# containing some "dry" LGL nodes. That is, if an element is partially "wet" then it becomes a
+# full FV element.
+#
+# TODO: TrixiShallowWater: move new indicator type
+function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, 3
+ },
+ mesh,
+ equations::ShallowWaterEquations1D,
+ dg::DGSEM, cache;
+ kwargs...)
+ @unpack alpha_max, alpha_min, alpha_smooth, variable = indicator_hg
+ @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded = indicator_hg.cache
+ # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR?
+ # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)`
+ # or just `resize!` whenever we call the relevant methods as we do now?
+ resize!(alpha, nelements(dg, cache))
+ if alpha_smooth
+ resize!(alpha_tmp, nelements(dg, cache))
+ end
+
+ # magic parameters
+ threshold = 0.5 * 10^(-1.8 * (nnodes(dg))^0.25)
+ parameter_s = log((1 - 0.0001) / 0.0001)
+
+ # If the water height `h` at one LGL node is lower than `threshold_partially_wet`
+ # the indicator sets the element-wise blending factor alpha[element] = 1
+ # via the local variable `indicator_wet`. In turn, this ensures that a pure
+ # FV method is used in partially wet cells and guarantees the well-balanced property.
+ #
+ # Hard-coded cut-off value of `threshold_partially_wet = 1e-4` was determined through many numerical experiments.
+ # Overall idea is to increase robustness when computing the velocity on (nearly) dry cells which
+ # could be "dangerous" due to division of conservative variables, e.g., v = hv / h.
+ # Here, the impact of the threshold on the number of cells being updated with FV is not that
+ # significant. However, its impact on the robustness is very significant.
+ # The value can be seen as a trade-off between accuracy and stability.
+ # Well-balancedness of the scheme on partially wet cells with hydrostatic reconstruction
+ # can only be proven for the FV method (see Chen and Noelle).
+ # Therefore we set alpha to one regardless of its given maximum value.
+ threshold_partially_wet = 1e-4
+
+ @threaded for element in eachelement(dg, cache)
+ indicator = indicator_threaded[Threads.threadid()]
+ modal = modal_threaded[Threads.threadid()]
+
+ # (Re-)set dummy variable for alpha_dry
+ indicator_wet = 1
+
+ # Calculate indicator variables at Gauss-Lobatto nodes
+ for i in eachnode(dg)
+ u_local = get_node_vars(u, equations, dg, i, element)
+ h, _, _ = u_local
+
+ if h <= threshold_partially_wet
+ indicator_wet = 0
+ end
+
+ indicator[i] = indicator_hg.variable(u_local, equations)
+ end
+
+ # Convert to modal representation
+ multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre,
+ indicator)
+
+ # Calculate total energies for all modes, without highest, without two highest
+ total_energy = zero(eltype(modal))
+ for i in 1:nnodes(dg)
+ total_energy += modal[i]^2
+ end
+ total_energy_clip1 = zero(eltype(modal))
+ for i in 1:(nnodes(dg) - 1)
+ total_energy_clip1 += modal[i]^2
+ end
+ total_energy_clip2 = zero(eltype(modal))
+ for i in 1:(nnodes(dg) - 2)
+ total_energy_clip2 += modal[i]^2
+ end
+
+ # Calculate energy in higher modes
+ energy = max((total_energy - total_energy_clip1) / total_energy,
+ (total_energy_clip1 - total_energy_clip2) / total_energy_clip1)
+
+ alpha_element = 1 / (1 + exp(-parameter_s / threshold * (energy - threshold)))
+
+ # Take care of the case close to pure DG
+ if alpha_element < alpha_min
+ alpha_element = zero(alpha_element)
+ end
+
+ # Take care of the case close to pure FV
+ if alpha_element > 1 - alpha_min
+ alpha_element = one(alpha_element)
+ end
+
+ # Clip the maximum amount of FV allowed or set to one depending on indicator_wet
+ if indicator_wet == 0
+ alpha[element] = 1
+ else # Element is not defined as dry but wet
+ alpha[element] = min(alpha_max, alpha_element)
+ end
+ end
+
+ if alpha_smooth
+ apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache)
+ end
+
+ return alpha
+end
+
# Use this function barrier and unpack inside to avoid passing closures to Polyester.jl
# with @batch (@threaded).
# Otherwise, @threaded does not work here with Julia ARM on macOS.
diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl
index 085cb71ad0c..f7c78547174 100644
--- a/src/solvers/dgsem_tree/indicators_2d.jl
+++ b/src/solvers/dgsem_tree/indicators_2d.jl
@@ -28,6 +28,116 @@ function create_cache(typ::Type{IndicatorHennemannGassner}, mesh,
create_cache(typ, equations, dg.basis)
end
+# Modified indicator for ShallowWaterEquations2D to apply full FV method on cells
+# containing some "dry" LGL nodes. That is, if an element is partially "wet" then it becomes a
+# full FV element.
+#
+# TODO: TrixiShallowWater: move new indicator type
+function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, 4
+ },
+ mesh,
+ equations::ShallowWaterEquations2D,
+ dg::DGSEM, cache;
+ kwargs...)
+ @unpack alpha_max, alpha_min, alpha_smooth, variable = indicator_hg
+ @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded, modal_tmp1_threaded = indicator_hg.cache
+ # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR?
+ # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)`
+ # or just `resize!` whenever we call the relevant methods as we do now?
+ resize!(alpha, nelements(dg, cache))
+ if alpha_smooth
+ resize!(alpha_tmp, nelements(dg, cache))
+ end
+
+ # magic parameters
+ threshold = 0.5 * 10^(-1.8 * (nnodes(dg))^0.25)
+ parameter_s = log((1 - 0.0001) / 0.0001)
+
+ # If the water height `h` at one LGL node is lower than `threshold_partially_wet`
+ # the indicator sets the element-wise blending factor alpha[element] = 1
+ # via the local variable `indicator_wet`. In turn, this ensures that a pure
+ # FV method is used in partially wet cells and guarantees the well-balanced property.
+ #
+ # Hard-coded cut-off value of `threshold_partially_wet = 1e-4` was determined through many numerical experiments.
+ # Overall idea is to increase robustness when computing the velocity on (nearly) dry cells which
+ # could be "dangerous" due to division of conservative variables, e.g., v1 = hv1 / h.
+ # Here, the impact of the threshold on the number of cells being updated with FV is not that
+ # significant. However, its impact on the robustness is very significant.
+ # The value can be seen as a trade-off between accuracy and stability.
+ # Well-balancedness of the scheme on partially wet cells with hydrostatic reconstruction
+ # can only be proven for the FV method (see Chen and Noelle).
+ # Therefore we set alpha to be one regardless of its given value from the modal indicator.
+ threshold_partially_wet = 1e-4
+
+ @threaded for element in eachelement(dg, cache)
+ indicator = indicator_threaded[Threads.threadid()]
+ modal = modal_threaded[Threads.threadid()]
+ modal_tmp1 = modal_tmp1_threaded[Threads.threadid()]
+
+ # (Re-)set dummy variable for alpha_dry
+ indicator_wet = 1
+
+ # Calculate indicator variables at Gauss-Lobatto nodes
+ for j in eachnode(dg), i in eachnode(dg)
+ u_local = get_node_vars(u, equations, dg, i, j, element)
+ h, _, _, _ = u_local
+
+ if h <= threshold_partially_wet
+ indicator_wet = 0
+ end
+
+ indicator[i, j] = indicator_hg.variable(u_local, equations)
+ end
+
+ # Convert to modal representation
+ multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre,
+ indicator, modal_tmp1)
+
+ # Calculate total energies for all modes, without highest, without two highest
+ total_energy = zero(eltype(modal))
+ for j in 1:nnodes(dg), i in 1:nnodes(dg)
+ total_energy += modal[i, j]^2
+ end
+ total_energy_clip1 = zero(eltype(modal))
+ for j in 1:(nnodes(dg) - 1), i in 1:(nnodes(dg) - 1)
+ total_energy_clip1 += modal[i, j]^2
+ end
+ total_energy_clip2 = zero(eltype(modal))
+ for j in 1:(nnodes(dg) - 2), i in 1:(nnodes(dg) - 2)
+ total_energy_clip2 += modal[i, j]^2
+ end
+
+ # Calculate energy in higher modes
+ energy = max((total_energy - total_energy_clip1) / total_energy,
+ (total_energy_clip1 - total_energy_clip2) / total_energy_clip1)
+
+ alpha_element = 1 / (1 + exp(-parameter_s / threshold * (energy - threshold)))
+
+ # Take care of the case close to pure DG
+ if alpha_element < alpha_min
+ alpha_element = zero(alpha_element)
+ end
+
+ # Take care of the case close to pure FV
+ if alpha_element > 1 - alpha_min
+ alpha_element = one(alpha_element)
+ end
+
+ # Clip the maximum amount of FV allowed or set to 1 depending on indicator_wet
+ if indicator_wet == 0
+ alpha[element] = 1
+ else # Element is not defined as dry but wet
+ alpha[element] = min(alpha_max, alpha_element)
+ end
+ end
+
+ if alpha_smooth
+ apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache)
+ end
+
+ return alpha
+end
+
# Use this function barrier and unpack inside to avoid passing closures to Polyester.jl
# with @batch (@threaded).
# Otherwise, @threaded does not work here with Julia ARM on macOS.
diff --git a/test/Project.toml b/test/Project.toml
index 7d386415227..cae1d4ff396 100644
--- a/test/Project.toml
+++ b/test/Project.toml
@@ -17,6 +17,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
+Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl
index 16fc72f0a46..75937ba82ad 100644
--- a/test/test_structured_2d.jl
+++ b/test/test_structured_2d.jl
@@ -1,5 +1,7 @@
module TestExamplesStructuredMesh2D
+# TODO: TrixiShallowWater: move any wet/dry tests to new package
+
using Test
using Trixi
@@ -20,7 +22,7 @@ isdir(outdir) && rm(outdir, recursive=true)
end
@trixi_testset "elixir_advection_coupled.jl" begin
- @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"),
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"),
l2 = [7.816742843181738e-6, 7.816742843196112e-6],
linf = [6.314906965543265e-5, 6.314906965410039e-5],
coverage_override = (maxiters=10^5,))
@@ -270,6 +272,27 @@ isdir(outdir) && rm(outdir, recursive=true)
tspan = (0.0, 0.25))
end
+ @trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_well_balanced_wet_dry.jl"),
+ l2 = [0.019731646454942086, 1.0694532773278277e-14, 1.1969913383405568e-14, 0.0771517260037954],
+ linf = [0.4999999999998892, 6.067153702623552e-14, 4.4849667259339357e-14, 1.9999999999999993],
+ tspan = (0.0, 0.25))
+ end
+
+ @trixi_testset "elixir_shallowwater_conical_island.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_conical_island.jl"),
+ l2 = [0.04593154164306353, 0.1644534881916908, 0.16445348819169076, 0.0011537702354532122],
+ linf = [0.21100717610846442, 0.9501592344310412, 0.950159234431041, 0.021790250683516296],
+ tspan = (0.0, 0.025))
+ end
+
+ @trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"),
+ l2 = [0.00015285369980313484, 1.9536806395943226e-5, 9.936906607758672e-5, 5.0686313334616055e-15],
+ linf = [0.003316119030459211, 0.0005075409427972817, 0.001986721761060583, 4.701794509287538e-14],
+ tspan = (0.0, 0.025), cells_per_dimension = (40, 40))
+ end
+
@trixi_testset "elixir_mhd_ec_shockcapturing.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_ec_shockcapturing.jl"),
l2 = [0.0364192725149364, 0.0426667193422069, 0.04261673001449095, 0.025884071405646924,
diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl
index 1c3bac1fab6..cafa17edd4c 100644
--- a/test/test_tree_1d_shallowwater.jl
+++ b/test/test_tree_1d_shallowwater.jl
@@ -1,5 +1,7 @@
module TestExamples1DShallowWater
+# TODO: TrixiShallowWater: move any wet/dry tests to new package
+
using Test
using Trixi
@@ -38,6 +40,13 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem")
tspan = (0.0, 0.25))
end
+ @trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl with FluxHydrostaticReconstruction" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_well_balanced_wet_dry.jl"),
+ l2 = [0.00965787167169024, 5.345454081916856e-14, 0.03857583749209928],
+ linf = [0.4999999999998892, 2.2447689894899726e-13, 1.9999999999999714],
+ tspan = (0.0, 0.25))
+ end
+
@trixi_testset "elixir_shallowwater_source_terms.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"),
l2 = [0.0022363707373868713, 0.01576799981934617, 4.436491725585346e-5],
@@ -88,6 +97,20 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem")
linf = [1.1209754279344226, 1.3230788645853582, 0.8646939843534251],
tspan = (0.0, 0.05))
end
+
+ @trixi_testset "elixir_shallowwater_beach.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_beach.jl"),
+ l2 = [0.17979210479598923, 1.2377495706611434, 6.289818963361573e-8],
+ linf = [0.845938394800688, 3.3740800777086575, 4.4541473087633676e-7],
+ tspan = (0.0, 0.05))
+ end
+
+ @trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"),
+ l2 = [8.965981683033589e-5, 1.8565707397810857e-5, 4.1043039226164336e-17],
+ linf = [0.00041080213807871235, 0.00014823261488938177, 2.220446049250313e-16],
+ tspan = (0.0, 0.05))
+ end
end
end # module
diff --git a/test/test_tree_1d_shallowwater_twolayer.jl b/test/test_tree_1d_shallowwater_twolayer.jl
index 0d8a83806f9..8372d0d4676 100644
--- a/test/test_tree_1d_shallowwater_twolayer.jl
+++ b/test/test_tree_1d_shallowwater_twolayer.jl
@@ -1,5 +1,7 @@
module TestExamples1DShallowWaterTwoLayer
+# TODO: TrixiShallowWater: move two layer tests to new package
+
using Test
using Trixi
diff --git a/test/test_tree_2d_shallowwater.jl b/test/test_tree_2d_shallowwater.jl
index f465a177a67..7670d28f43a 100644
--- a/test/test_tree_2d_shallowwater.jl
+++ b/test/test_tree_2d_shallowwater.jl
@@ -1,5 +1,7 @@
module TestExamples2DShallowWater
+# TODO: TrixiShallowWater: move any wet/dry tests to new package
+
using Test
using Trixi
@@ -37,6 +39,13 @@ EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem")
tspan = (0.0, 0.25))
end
+ @trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl with FluxHydrostaticReconstruction" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_well_balanced_wet_dry.jl"),
+ l2 = [0.030186039395610056, 2.513287752536758e-14, 1.3631397744897607e-16, 0.10911781485920438],
+ linf = [0.49999999999993505, 5.5278950497971455e-14, 7.462550826772548e-16, 2.0],
+ tspan = (0.0, 0.25))
+ end
+
@trixi_testset "elixir_shallowwater_source_terms.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"),
l2 = [0.001868474306068482, 0.01731687445878443, 0.017649083171490863, 6.274146767717023e-5],
@@ -57,6 +66,21 @@ EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem")
linf = [0.015156105797771602, 0.07964811135780492, 0.0839787097210376, 0.0001819675955490041],
tspan = (0.0, 0.025), surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal))
end
+
+ @trixi_testset "elixir_shallowwater_conical_island.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_conical_island.jl"),
+ l2 = [0.0459315416430658, 0.1644534881916991, 0.16445348819169914, 0.0011537702354532694],
+ linf = [0.21100717610846464, 0.9501592344310412, 0.9501592344310417, 0.021790250683516282],
+ tspan = (0.0, 0.025))
+ end
+
+ @trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"),
+ l2 = [0.00025345501281482687, 4.4525120338817177e-5, 0.00015991819160294247, 7.750412064917294e-15],
+ linf = [0.004664246019836723, 0.0004972780116736669, 0.0028735707270457628, 6.866729407306593e-14],
+ tspan = (0.0, 0.025),
+ basis = LobattoLegendreBasis(3))
+ end
end
end # module
diff --git a/test/test_tree_2d_shallowwater_twolayer.jl b/test/test_tree_2d_shallowwater_twolayer.jl
index 4bb45064714..7ad5b0f7316 100644
--- a/test/test_tree_2d_shallowwater_twolayer.jl
+++ b/test/test_tree_2d_shallowwater_twolayer.jl
@@ -1,5 +1,7 @@
module TestExamples2DShallowWaterTwoLayer
+# TODO: TrixiShallowWater: move two layer tests to new package
+
using Test
using Trixi
@@ -19,10 +21,10 @@ EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem")
@trixi_testset "elixir_shallowwater_twolayer_convergence.jl with flux_es_fjordholm_etal" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_twolayer_convergence.jl"),
- l2 = [0.00024709443131137236, 0.0019215286339769443, 0.0023833298173254447,
+ l2 = [0.00024709443131137236, 0.0019215286339769443, 0.0023833298173254447,
0.00021258247976270914, 0.0011299428031136195, 0.0009191313765262401,
- 8.873630921431545e-6],
- linf = [0.0016099763244645793, 0.007659242165565017, 0.009123320235427057,
+ 8.873630921431545e-6],
+ linf = [0.0016099763244645793, 0.007659242165565017, 0.009123320235427057,
0.0013496983982568267, 0.0035573687287770994, 0.00296823235874899,
3.361991620143279e-5],
surface_flux = (flux_es_fjordholm_etal, flux_nonconservative_fjordholm_etal),
@@ -31,19 +33,19 @@ EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem")
@trixi_testset "elixir_shallowwater_twolayer_well_balanced.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_twolayer_well_balanced.jl"),
- l2 = [3.2935164267930016e-16, 4.6800825611195103e-17, 4.843057532147818e-17,
- 0.0030769233188015013, 1.4809161150389857e-16, 1.509071695038043e-16,
+ l2 = [3.2935164267930016e-16, 4.6800825611195103e-17, 4.843057532147818e-17,
+ 0.0030769233188015013, 1.4809161150389857e-16, 1.509071695038043e-16,
0.0030769233188014935],
- linf = [2.248201624865942e-15, 2.346382070278936e-16, 2.208565017494899e-16,
- 0.026474051138910493, 9.237568031609006e-16, 7.520758026187046e-16,
+ linf = [2.248201624865942e-15, 2.346382070278936e-16, 2.208565017494899e-16,
+ 0.026474051138910493, 9.237568031609006e-16, 7.520758026187046e-16,
0.026474051138910267],
tspan = (0.0, 0.25))
end
@trixi_testset "elixir_shallowwater_twolayer_well_balanced with flux_lax_friedrichs.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_twolayer_well_balanced.jl"),
- l2 = [2.0525741072929735e-16, 6.000589392730905e-17, 6.102759428478984e-17,
- 0.0030769233188014905, 1.8421386173122792e-16, 1.8473184927121752e-16,
+ l2 = [2.0525741072929735e-16, 6.000589392730905e-17, 6.102759428478984e-17,
+ 0.0030769233188014905, 1.8421386173122792e-16, 1.8473184927121752e-16,
0.0030769233188014935],
linf = [7.355227538141662e-16, 2.960836949170518e-16, 4.2726562436938764e-16,
0.02647405113891016, 1.038795478061861e-15, 1.0401789378532516e-15,
diff --git a/test/test_unit.jl b/test/test_unit.jl
index 2ce111b2bf4..e70a9be6a4a 100644
--- a/test/test_unit.jl
+++ b/test/test_unit.jl
@@ -402,6 +402,10 @@ isdir(outdir) && rm(outdir, recursive=true)
indicator_hg = IndicatorHennemannGassner(1.0, 0.0, true, "variable", "cache")
@test_nowarn show(stdout, indicator_hg)
+ # TODO: TrixiShallowWater: move unit test
+ indicator_hg_swe = IndicatorHennemannGassnerShallowWater(1.0, 0.0, true, "variable", "cache")
+ @test_nowarn show(stdout, indicator_hg_swe)
+
indicator_loehner = IndicatorLöhner(1.0, "variable", (; cache=nothing))
@test_nowarn show(stdout, indicator_loehner)
diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl
index d4b0d150ca1..fbe88a2a0a3 100644
--- a/test/test_unstructured_2d.jl
+++ b/test/test_unstructured_2d.jl
@@ -1,5 +1,7 @@
module TestExamplesUnstructuredMesh2D
+# TODO: TrixiShallowWater: move any wet/dry and two layer tests
+
using Test
using Trixi
@@ -134,6 +136,14 @@ isdir(outdir) && rm(outdir, recursive=true)
tspan = (0.0, 0.025))
end
+ @trixi_testset "elixir_shallowwater_source_terms.jl with flux_hll" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"),
+ l2 = [0.0011197139793938727, 0.015430259691311309, 0.017081031802719554, 5.089218476759981e-6],
+ linf = [0.014300809338967824, 0.12783372461224918, 0.17625472321993918, 2.6407324614341476e-5],
+ surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal),
+ tspan = (0.0, 0.025))
+ end
+
@trixi_testset "elixir_shallowwater_dirichlet.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_dirichlet.jl"),
l2 = [1.1577518608940115e-5, 4.867189932537344e-13, 4.647273240470541e-13, 1.1577518608933468e-5],
@@ -155,6 +165,14 @@ isdir(outdir) && rm(outdir, recursive=true)
tspan = (0.0, 0.25))
end
+ @trixi_testset "elixir_shallowwater_three_mound_dam_break.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_three_mound_dam_break.jl"),
+ l2 = [0.0892957892027502, 0.30648836484407915, 2.28712547616214e-15, 0.0008778654298684622],
+ linf = [0.850329472915091, 2.330631694956507, 5.783660020252348e-14, 0.04326237921249021],
+ basis = LobattoLegendreBasis(3),
+ tspan = (0.0, 0.25))
+ end
+
@trixi_testset "elixir_shallowwater_twolayer_convergence.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_twolayer_convergence.jl"),
l2 = [0.0007953969898161991, 0.00882074628714633, 0.0024322572528892934,
From c97eb8c47a0a092ab73889c8ce7c838ba7864127 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 15 Jul 2023 06:51:23 +0200
Subject: [PATCH 12/40] set version to v0.5.33
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 4a289380850..9dd7ecd023f 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.33-pre"
+version = "0.5.33"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 10e0fa93320cd8722d482a89a0e944955b0c23ee Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 15 Jul 2023 06:51:38 +0200
Subject: [PATCH 13/40] set development version to v0.5.34-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 9dd7ecd023f..6c3c7fa0208 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.33"
+version = "0.5.34-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 07981c3e50943a9bdb1a845f918a4e8831714338 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 15 Jul 2023 12:52:38 +0200
Subject: [PATCH 14/40] throw better error message with MPI for TreeMesh1D
(#1569)
---
src/meshes/tree_mesh.jl | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/meshes/tree_mesh.jl b/src/meshes/tree_mesh.jl
index 34794ded852..93ba982bce9 100644
--- a/src/meshes/tree_mesh.jl
+++ b/src/meshes/tree_mesh.jl
@@ -125,9 +125,9 @@ function TreeMesh(coordinates_min::NTuple{NDIMS, Real},
# TODO: MPI, create nice interface for a parallel tree/mesh
if mpi_isparallel()
- if mpi_isroot() && NDIMS == 3
+ if mpi_isroot() && NDIMS != 2
println(stderr,
- "ERROR: TreeMesh3D does not support parallel execution with MPI")
+ "ERROR: The TreeMesh supports parallel execution with MPI only in 2 dimensions")
MPI.Abort(mpi_comm(), 1)
end
TreeType = ParallelTree{NDIMS}
From 932f43358acba90e3de7909c2bd18c8b630c66b9 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 15 Jul 2023 16:36:48 +0200
Subject: [PATCH 15/40] allow periodic FDSBP operators (#1570)
* enable fully periodic upwind SBP oeprators
* 2D and 3D tests
* comment on PeriodicFDSBP
---
Project.toml | 2 +-
.../tree_1d_fdsbp/elixir_advection_upwind.jl | 3 +-
.../elixir_advection_upwind_periodic.jl | 57 +++++++++++++++++++
src/solvers/fdsbp_tree/fdsbp.jl | 3 +
src/solvers/fdsbp_tree/fdsbp_1d.jl | 28 ++++++++-
src/solvers/fdsbp_tree/fdsbp_2d.jl | 28 ++++++++-
src/solvers/fdsbp_tree/fdsbp_3d.jl | 28 ++++++++-
test/test_tree_1d_fdsbp.jl | 15 +++++
test/test_tree_2d_fdsbp.jl | 18 ++++++
test/test_tree_3d_fdsbp.jl | 23 +++++++-
10 files changed, 196 insertions(+), 9 deletions(-)
create mode 100644 examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl
diff --git a/Project.toml b/Project.toml
index 6c3c7fa0208..a49cfb2e254 100644
--- a/Project.toml
+++ b/Project.toml
@@ -79,7 +79,7 @@ StaticArrayInterface = "1.4"
StaticArrays = "1"
StrideArrays = "0.1.18"
StructArrays = "0.6"
-SummationByPartsOperators = "0.5.25"
+SummationByPartsOperators = "0.5.41"
TimerOutputs = "0.5"
Triangulate = "2.0"
TriplotBase = "0.1"
diff --git a/examples/tree_1d_fdsbp/elixir_advection_upwind.jl b/examples/tree_1d_fdsbp/elixir_advection_upwind.jl
index 5c50e1a6c64..18dd818e3ca 100644
--- a/examples/tree_1d_fdsbp/elixir_advection_upwind.jl
+++ b/examples/tree_1d_fdsbp/elixir_advection_upwind.jl
@@ -27,7 +27,8 @@ coordinates_min = -1.0
coordinates_max = 1.0
mesh = TreeMesh(coordinates_min, coordinates_max,
initial_refinement_level = 4,
- n_cells_max = 10_000)
+ n_cells_max = 10_000,
+ periodicity = true)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_sin, solver)
diff --git a/examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl b/examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl
new file mode 100644
index 00000000000..3eb805095f4
--- /dev/null
+++ b/examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl
@@ -0,0 +1,57 @@
+# !!! warning "Experimental implementation (upwind SBP)"
+# This is an experimental feature and may change in future releases.
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the linear scalar advection equation equation
+
+equations = LinearScalarAdvectionEquation1D(1.0)
+
+function initial_condition_sin(x, t, equation::LinearScalarAdvectionEquation1D)
+ return SVector(sinpi(x[1] - equations.advection_velocity[1] * t))
+end
+
+D_upw = upwind_operators(SummationByPartsOperators.periodic_derivative_operator,
+ accuracy_order = 4,
+ xmin = -1.0, xmax = 1.0,
+ N = 64)
+flux_splitting = splitting_lax_friedrichs
+solver = FDSBP(D_upw,
+ surface_integral = SurfaceIntegralUpwind(flux_splitting),
+ volume_integral = VolumeIntegralUpwind(flux_splitting))
+
+coordinates_min = -1.0
+coordinates_max = 1.0
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level = 0,
+ n_cells_max = 10_000,
+ periodicity = true)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_sin, solver)
+
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 2.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback)
+
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, RDPK3SpFSAL49(); abstol=1.0e-6, reltol=1.0e-6,
+ ode_default_options()..., callback=callbacks);
+summary_callback() # print the timer summary
diff --git a/src/solvers/fdsbp_tree/fdsbp.jl b/src/solvers/fdsbp_tree/fdsbp.jl
index cbb6fd16243..11b09c6df9c 100644
--- a/src/solvers/fdsbp_tree/fdsbp.jl
+++ b/src/solvers/fdsbp_tree/fdsbp.jl
@@ -27,6 +27,9 @@ The other arguments have the same meaning as in [`DG`](@ref) or [`DGSEM`](@ref).
"""
const FDSBP = DG{Basis} where {Basis <: AbstractDerivativeOperator}
+# Internal abbreviation for easier-to-read dispatch (not exported)
+const PeriodicFDSBP = FDSBP{Basis} where {Basis <: AbstractPeriodicDerivativeOperator}
+
function FDSBP(D_SBP::AbstractDerivativeOperator; surface_integral, volume_integral)
# `nothing` is passed as `mortar`
return DG(D_SBP, nothing, surface_integral, volume_integral)
diff --git a/src/solvers/fdsbp_tree/fdsbp_1d.jl b/src/solvers/fdsbp_tree/fdsbp_1d.jl
index c7712074940..0de0cff4851 100644
--- a/src/solvers/fdsbp_tree/fdsbp_1d.jl
+++ b/src/solvers/fdsbp_tree/fdsbp_1d.jl
@@ -165,6 +165,14 @@ function calc_surface_integral!(du, u, mesh::TreeMesh{1},
return nothing
end
+# Periodic FDSBP operators need to use a single element without boundaries
+function calc_surface_integral!(du, u, mesh::TreeMesh1D,
+ equations, surface_integral::SurfaceIntegralStrongForm,
+ dg::PeriodicFDSBP, cache)
+ @assert nelements(dg, cache) == 1
+ return nothing
+end
+
# Specialized interface flux computation because the upwind solver does
# not require a standard numerical flux (Riemann solver). The flux splitting
# already separates the solution information into right-traveling and
@@ -239,13 +247,25 @@ function calc_surface_integral!(du, u, mesh::TreeMesh{1},
return nothing
end
+# Periodic FDSBP operators need to use a single element without boundaries
+function calc_surface_integral!(du, u, mesh::TreeMesh1D,
+ equations, surface_integral::SurfaceIntegralUpwind,
+ dg::PeriodicFDSBP, cache)
+ @assert nelements(dg, cache) == 1
+ return nothing
+end
+
# AnalysisCallback
function integrate_via_indices(func::Func, u,
mesh::TreeMesh{1}, equations,
dg::FDSBP, cache, args...; normalize = true) where {Func}
# TODO: FD. This is rather inefficient right now and allocates...
- weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))
+ M = SummationByPartsOperators.mass_matrix(dg.basis)
+ if M isa UniformScaling
+ M = M(nnodes(dg))
+ end
+ weights = diag(M)
# Initialize integral with zeros of the right shape
integral = zero(func(u, 1, 1, equations, dg, args...))
@@ -271,7 +291,11 @@ function calc_error_norms(func, u, t, analyzer,
mesh::TreeMesh{1}, equations, initial_condition,
dg::FDSBP, cache, cache_analysis)
# TODO: FD. This is rather inefficient right now and allocates...
- weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))
+ M = SummationByPartsOperators.mass_matrix(dg.basis)
+ if M isa UniformScaling
+ M = M(nnodes(dg))
+ end
+ weights = diag(M)
@unpack node_coordinates = cache.elements
# Set up data structures
diff --git a/src/solvers/fdsbp_tree/fdsbp_2d.jl b/src/solvers/fdsbp_tree/fdsbp_2d.jl
index 241e0d95342..beff605629a 100644
--- a/src/solvers/fdsbp_tree/fdsbp_2d.jl
+++ b/src/solvers/fdsbp_tree/fdsbp_2d.jl
@@ -201,6 +201,14 @@ function calc_surface_integral!(du, u, mesh::TreeMesh{2},
return nothing
end
+# Periodic FDSBP operators need to use a single element without boundaries
+function calc_surface_integral!(du, u, mesh::TreeMesh2D,
+ equations, surface_integral::SurfaceIntegralStrongForm,
+ dg::PeriodicFDSBP, cache)
+ @assert nelements(dg, cache) == 1
+ return nothing
+end
+
# Specialized interface flux computation because the upwind solver does
# not require a standard numerical flux (Riemann solver). The flux splitting
# already separates the solution information into right-traveling and
@@ -295,12 +303,24 @@ function calc_surface_integral!(du, u, mesh::TreeMesh{2},
return nothing
end
+# Periodic FDSBP operators need to use a single element without boundaries
+function calc_surface_integral!(du, u, mesh::TreeMesh2D,
+ equations, surface_integral::SurfaceIntegralUpwind,
+ dg::PeriodicFDSBP, cache)
+ @assert nelements(dg, cache) == 1
+ return nothing
+end
+
# AnalysisCallback
function integrate_via_indices(func::Func, u,
mesh::TreeMesh{2}, equations,
dg::FDSBP, cache, args...; normalize = true) where {Func}
# TODO: FD. This is rather inefficient right now and allocates...
- weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))
+ M = SummationByPartsOperators.mass_matrix(dg.basis)
+ if M isa UniformScaling
+ M = M(nnodes(dg))
+ end
+ weights = diag(M)
# Initialize integral with zeros of the right shape
integral = zero(func(u, 1, 1, 1, equations, dg, args...))
@@ -326,7 +346,11 @@ function calc_error_norms(func, u, t, analyzer,
mesh::TreeMesh{2}, equations, initial_condition,
dg::FDSBP, cache, cache_analysis)
# TODO: FD. This is rather inefficient right now and allocates...
- weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))
+ M = SummationByPartsOperators.mass_matrix(dg.basis)
+ if M isa UniformScaling
+ M = M(nnodes(dg))
+ end
+ weights = diag(M)
@unpack node_coordinates = cache.elements
# Set up data structures
diff --git a/src/solvers/fdsbp_tree/fdsbp_3d.jl b/src/solvers/fdsbp_tree/fdsbp_3d.jl
index a4f69d3d481..0c3f18b6d6e 100644
--- a/src/solvers/fdsbp_tree/fdsbp_3d.jl
+++ b/src/solvers/fdsbp_tree/fdsbp_3d.jl
@@ -237,6 +237,14 @@ function calc_surface_integral!(du, u, mesh::TreeMesh{3},
return nothing
end
+# Periodic FDSBP operators need to use a single element without boundaries
+function calc_surface_integral!(du, u, mesh::TreeMesh3D,
+ equations, surface_integral::SurfaceIntegralStrongForm,
+ dg::PeriodicFDSBP, cache)
+ @assert nelements(dg, cache) == 1
+ return nothing
+end
+
# Specialized interface flux computation because the upwind solver does
# not require a standard numerical flux (Riemann solver). The flux splitting
# already separates the solution information into right-traveling and
@@ -346,13 +354,25 @@ function calc_surface_integral!(du, u, mesh::TreeMesh{3},
return nothing
end
+# Periodic FDSBP operators need to use a single element without boundaries
+function calc_surface_integral!(du, u, mesh::TreeMesh3D,
+ equations, surface_integral::SurfaceIntegralUpwind,
+ dg::PeriodicFDSBP, cache)
+ @assert nelements(dg, cache) == 1
+ return nothing
+end
+
# AnalysisCallback
function integrate_via_indices(func::Func, u,
mesh::TreeMesh{3}, equations,
dg::FDSBP, cache, args...; normalize = true) where {Func}
# TODO: FD. This is rather inefficient right now and allocates...
- weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))
+ M = SummationByPartsOperators.mass_matrix(dg.basis)
+ if M isa UniformScaling
+ M = M(nnodes(dg))
+ end
+ weights = diag(M)
# Initialize integral with zeros of the right shape
integral = zero(func(u, 1, 1, 1, 1, equations, dg, args...))
@@ -378,7 +398,11 @@ function calc_error_norms(func, u, t, analyzer,
mesh::TreeMesh{3}, equations, initial_condition,
dg::FDSBP, cache, cache_analysis)
# TODO: FD. This is rather inefficient right now and allocates...
- weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))
+ M = SummationByPartsOperators.mass_matrix(dg.basis)
+ if M isa UniformScaling
+ M = M(nnodes(dg))
+ end
+ weights = diag(M)
@unpack node_coordinates = cache.elements
# Set up data structures
diff --git a/test/test_tree_1d_fdsbp.jl b/test/test_tree_1d_fdsbp.jl
index 118385c34b3..ce0ca660d35 100644
--- a/test/test_tree_1d_fdsbp.jl
+++ b/test/test_tree_1d_fdsbp.jl
@@ -23,6 +23,21 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_fdsbp")
@test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
end
end
+
+ @trixi_testset "elixir_advection_upwind_periodic.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_upwind_periodic.jl"),
+ l2 = [1.1672962783692568e-5],
+ linf = [1.650514414558435e-5])
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
+ end
end
@testset "Inviscid Burgers" begin
diff --git a/test/test_tree_2d_fdsbp.jl b/test/test_tree_2d_fdsbp.jl
index 7c58ef89a6c..e81c82f3f34 100644
--- a/test/test_tree_2d_fdsbp.jl
+++ b/test/test_tree_2d_fdsbp.jl
@@ -23,6 +23,24 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_fdsbp")
@test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
end
end
+
+ @trixi_testset "elixir_advection_extended.jl with periodic operators" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
+ l2 = [1.1239649404463432e-5],
+ linf = [1.5895264629195438e-5],
+ D_SBP = SummationByPartsOperators.periodic_derivative_operator(
+ derivative_order = 1, accuracy_order = 4, xmin = 0.0, xmax = 1.0, N = 40),
+ initial_refinement_level = 0)
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
+ end
end
@testset "Compressible Euler" begin
diff --git a/test/test_tree_3d_fdsbp.jl b/test/test_tree_3d_fdsbp.jl
index 9dceab38031..106dd007b09 100644
--- a/test/test_tree_3d_fdsbp.jl
+++ b/test/test_tree_3d_fdsbp.jl
@@ -7,7 +7,7 @@ include("test_trixi.jl")
EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_fdsbp")
-@testset "Compressible Euler" begin
+@testset "Linear scalar advection" begin
@trixi_testset "elixir_advection_extended.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
l2 = [0.005355755365412444],
@@ -23,6 +23,27 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_fdsbp")
end
end
+ @trixi_testset "elixir_advection_extended.jl with periodic operators" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
+ l2 = [1.3819894522373702e-8],
+ linf = [3.381866298113323e-8],
+ D_SBP = SummationByPartsOperators.periodic_derivative_operator(
+ derivative_order = 1, accuracy_order = 4, xmin = 0.0, xmax = 1.0, N = 10),
+ initial_refinement_level = 0,
+ tspan = (0.0, 5.0))
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
+ end
+end
+
+@testset "Compressible Euler" begin
@trixi_testset "elixir_euler_convergence.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_convergence.jl"),
l2 = [2.247522803543667e-5, 2.2499169224681058e-5, 2.24991692246826e-5, 2.2499169224684707e-5, 5.814121361417382e-5],
From fd239da5af1ba619fa2457c6318a5f3ab3be59b3 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sun, 16 Jul 2023 06:11:40 +0200
Subject: [PATCH 16/40] set version to v0.5.34
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index a49cfb2e254..7f2a52b0aaf 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.34-pre"
+version = "0.5.34"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From d96514f6d58e48b33816f58e02959d167954fdea Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sun, 16 Jul 2023 06:11:55 +0200
Subject: [PATCH 17/40] set development version to v0.5.35-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 7f2a52b0aaf..4c187ed38ff 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.34"
+version = "0.5.35-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 75d70fdf5706ccdc5290303675bcd5ad1cf7d462 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 17 Jul 2023 19:15:37 +0200
Subject: [PATCH 18/40] Bump crate-ci/typos from 1.16.0 to 1.16.1 (#1573)
Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.0 to 1.16.1.
- [Release notes](https://github.com/crate-ci/typos/releases)
- [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crate-ci/typos/compare/v1.16.0...v1.16.1)
---
updated-dependencies:
- dependency-name: crate-ci/typos
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/SpellCheck.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
index bb5a32f72ee..f72c3b0947b 100644
--- a/.github/workflows/SpellCheck.yml
+++ b/.github/workflows/SpellCheck.yml
@@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v3
- name: Check spelling
- uses: crate-ci/typos@v1.16.0
+ uses: crate-ci/typos@v1.16.1
From a12f82da43a16d59db16063557fef245c87b6c0e Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 18 Jul 2023 05:52:17 +0200
Subject: [PATCH 19/40] fix typos in comments (#1572)
---
examples/tree_1d_fdsbp/elixir_advection_upwind.jl | 2 +-
examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/tree_1d_fdsbp/elixir_advection_upwind.jl b/examples/tree_1d_fdsbp/elixir_advection_upwind.jl
index 18dd818e3ca..1f2498e0866 100644
--- a/examples/tree_1d_fdsbp/elixir_advection_upwind.jl
+++ b/examples/tree_1d_fdsbp/elixir_advection_upwind.jl
@@ -5,7 +5,7 @@ using OrdinaryDiffEq
using Trixi
###############################################################################
-# semidiscretization of the linear scalar advection equation equation
+# semidiscretization of the linear scalar advection equation
equations = LinearScalarAdvectionEquation1D(1.0)
diff --git a/examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl b/examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl
index 3eb805095f4..035d3568a80 100644
--- a/examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl
+++ b/examples/tree_1d_fdsbp/elixir_advection_upwind_periodic.jl
@@ -5,7 +5,7 @@ using OrdinaryDiffEq
using Trixi
###############################################################################
-# semidiscretization of the linear scalar advection equation equation
+# semidiscretization of the linear scalar advection equation
equations = LinearScalarAdvectionEquation1D(1.0)
From 375384659cb57a80e253c5e685db8ec298e30d8c Mon Sep 17 00:00:00 2001
From: Michael Schlottke-Lakemper
Date: Wed, 19 Jul 2023 07:39:37 +0200
Subject: [PATCH 20/40] Add talk announcement for JuliaCon 2023 (#1575)
---
README.md | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/README.md b/README.md
index ccd70b6daf8..7eaee8750dd 100644
--- a/README.md
+++ b/README.md
@@ -17,6 +17,16 @@
+***
+**Trixi.jl at JuliaCon 2023**
+At this year's JuliaCon, we will be present with an online contribution that involves Trixi.jl:
+
+* [Scaling Trixi.jl to more than 10,000 cores using MPI](https://pretalx.com/juliacon2023/talk/PC8PZ8/),
+ 27th July 2023, 10:30–11:30 (US/Eastern), 32-G449 (Kiva)
+
+We are looking forward to seeing you there ♥️
+***
+
**Trixi.jl** is a numerical simulation framework for hyperbolic conservation
laws written in [Julia](https://julialang.org). A key objective for the
framework is to be useful to both scientists and students. Therefore, next to
From b0ec66ea004c84d8487c4318a54933da8c827c92 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 19 Jul 2023 12:16:53 +0200
Subject: [PATCH 21/40] fix GC time percentage output (#1576)
---
src/callbacks_step/analysis.jl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl
index 8cf43a1d15e..7c453aab633 100644
--- a/src/callbacks_step/analysis.jl
+++ b/src/callbacks_step/analysis.jl
@@ -267,7 +267,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi)
gc_time_absolute = 1.0e-9 * (Base.gc_time_ns() - analysis_callback.start_gc_time)
# Compute the percentage of total time that was spent in garbage collection
- gc_time_percentage = gc_time_absolute / runtime_absolute
+ gc_time_percentage = gc_time_absolute / runtime_absolute * 100
# Obtain the current memory usage of the Julia garbage collector, in MiB, i.e., the total size of
# objects in memory that have been allocated by the JIT compiler or the user code.
From e0aad3cad1a2581eb79cf4fd1d06e7d7fb2c6379 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 19 Jul 2023 14:11:35 +0200
Subject: [PATCH 22/40] set version to v0.5.35
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 4c187ed38ff..1818d1c56c9 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.35-pre"
+version = "0.5.35"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 67d137d6712f28bbe99ffef3d003afe96c47aade Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Wed, 19 Jul 2023 14:11:46 +0200
Subject: [PATCH 23/40] set development version to v0.5.36-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 1818d1c56c9..07c4fe55ad4 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.35"
+version = "0.5.36-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 1aec5fa7e17a8011baf77bfa1822491693e1986d Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 21 Jul 2023 11:56:38 +0200
Subject: [PATCH 24/40] test threaded time integration (#1581)
* test threaded time integration
* link to upstream issue in comment
---
examples/dgmulti_2d/elixir_euler_curved.jl | 3 +-
.../elixir_advection_diffusion.jl | 3 +-
.../tree_2d_dgsem/elixir_advection_restart.jl | 3 +-
test/Project.toml | 6 ++--
test/test_threaded.jl | 28 +++++++++++++++++++
5 files changed, 37 insertions(+), 6 deletions(-)
diff --git a/examples/dgmulti_2d/elixir_euler_curved.jl b/examples/dgmulti_2d/elixir_euler_curved.jl
index a3ba62f1cfb..39e3a0a0360 100644
--- a/examples/dgmulti_2d/elixir_euler_curved.jl
+++ b/examples/dgmulti_2d/elixir_euler_curved.jl
@@ -42,7 +42,8 @@ callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
###############################################################################
# run the simulation
-sol = solve(ode, RDPK3SpFSAL49(); abstol=1.0e-6, reltol=1.0e-6,
+alg = RDPK3SpFSAL49()
+sol = solve(ode, alg; abstol=1.0e-6, reltol=1.0e-6,
ode_default_options()..., callback=callbacks);
summary_callback() # print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_advection_diffusion.jl b/examples/tree_2d_dgsem/elixir_advection_diffusion.jl
index e96e1b5a171..a716bd278b8 100644
--- a/examples/tree_2d_dgsem/elixir_advection_diffusion.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_diffusion.jl
@@ -75,8 +75,9 @@ callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback)
# run the simulation
# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
+alg = RDPK3SpFSAL49()
time_int_tol = 1.0e-11
-sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
+sol = solve(ode, alg; abstol=time_int_tol, reltol=time_int_tol,
ode_default_options()..., callback=callbacks)
# Print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart.jl b/examples/tree_2d_dgsem/elixir_advection_restart.jl
index 4ceb5932573..72efb7d0c84 100644
--- a/examples/tree_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_restart.jl
@@ -26,7 +26,8 @@ ode = semidiscretize(semi, tspan, restart_filename);
# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
save_solution.condition.save_initial_solution = false
-integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
+alg = CarpenterKennedy2N54(williamson_condition=false)
+integrator = init(ode, alg,
dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
save_everystep=false, callback=callbacks)
diff --git a/test/Project.toml b/test/Project.toml
index cae1d4ff396..7115a19b441 100644
--- a/test/Project.toml
+++ b/test/Project.toml
@@ -24,9 +24,9 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[preferences.OrdinaryDiffEq]
PrecompileAutoSpecialize = false
PrecompileAutoSwitch = false
-PrecompileDefaultSpecialize = true
+PrecompileDefaultSpecialize = false
PrecompileFunctionWrapperSpecialize = false
-PrecompileLowStorage = true
+PrecompileLowStorage = false
PrecompileNoSpecialize = false
-PrecompileNonStiff = true
+PrecompileNonStiff = false
PrecompileStiff = false
diff --git a/test/test_threaded.jl b/test/test_threaded.jl
index 1e750707981..323d12d7091 100644
--- a/test/test_threaded.jl
+++ b/test/test_threaded.jl
@@ -18,6 +18,14 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
linf = [6.314906965243505e-5])
end
+ @trixi_testset "elixir_advection_restart.jl with threaded time integration" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_restart.jl"),
+ alg = CarpenterKennedy2N54(williamson_condition = false, thread = OrdinaryDiffEq.True()),
+ # Expected errors are exactly the same as in the serial test!
+ l2 = [7.81674284320524e-6],
+ linf = [6.314906965243505e-5])
+ end
+
@trixi_testset "elixir_advection_amr_refine_twice.jl" begin
@test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_amr_refine_twice.jl"),
l2 = [0.00020547512522578292],
@@ -42,6 +50,15 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
l2 = [0.061751715597716854, 0.05018223615408711, 0.05018989446443463, 0.225871559730513],
linf = [0.29347582879608825, 0.31081249232844693, 0.3107380389947736, 1.0540358049885143])
end
+
+ @trixi_testset "elixir_advection_diffusion.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_diffusion.jl"),
+ initial_refinement_level = 2, tspan = (0.0, 0.4), polydeg = 5,
+ alg = RDPK3SpFSAL49(thread = OrdinaryDiffEq.True()),
+ l2 = [4.0915532997994255e-6],
+ linf = [2.3040850347877395e-5]
+ )
+ end
end
@@ -108,6 +125,17 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
)
end
+ @trixi_testset "elixir_euler_curved.jl with threaded time integration" begin
+ @test_broken false
+ # TODO: This is currently broken and needs to be fixed upstream
+ # See https://github.com/JuliaSIMD/StrideArrays.jl/issues/77
+ # @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", "elixir_euler_curved.jl"),
+ # alg = RDPK3SpFSAL49(thread = OrdinaryDiffEq.True()),
+ # l2 = [1.720476068165337e-5, 1.592168205710526e-5, 1.592168205812963e-5, 4.894094865697305e-5],
+ # linf = [0.00010525416930584619, 0.00010003778091061122, 0.00010003778085621029, 0.00036426282101720275]
+ # )
+ end
+
@trixi_testset "elixir_euler_triangulate_pkg_mesh.jl" begin
@test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", "elixir_euler_triangulate_pkg_mesh.jl"),
l2 = [2.344080455438114e-6, 1.8610038753097983e-6, 2.4095165666095305e-6, 6.373308158814308e-6],
From 036eaed82b92be9376c5b610d8d40eddf45ca1fa Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Mon, 24 Jul 2023 13:26:16 +0200
Subject: [PATCH 25/40] reset threads in semidiscretize (#1584)
I added the option to reset the threads from Polyester.jl in semidiscretize.
However, I did not document it in the docstring since we have not documented
that we use Polyester.jl threads in general - and the resetting is specific
to Polyester.jl. I was not sure whether we still would like to keep the option
to change the threading backend any time - although I do not see a good reason
why we should do so.
---
Project.toml | 2 +-
src/Trixi.jl | 2 +-
src/semidiscretization/semidiscretization.jl | 20 +++++++++++++++++--
...semidiscretization_hyperbolic_parabolic.jl | 10 +++++++++-
4 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/Project.toml b/Project.toml
index 07c4fe55ad4..00bf2718d8b 100644
--- a/Project.toml
+++ b/Project.toml
@@ -65,7 +65,7 @@ MuladdMacro = "0.2.2"
Octavian = "0.3.5"
OffsetArrays = "1.3"
P4est = "0.4"
-Polyester = "0.3.4, 0.5, 0.6, 0.7"
+Polyester = "0.7.5"
PrecompileTools = "1.1"
RecipesBase = "1.1"
Reexport = "1.0"
diff --git a/src/Trixi.jl b/src/Trixi.jl
index cf6158e29eb..b0c872b1904 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -51,7 +51,7 @@ using LoopVectorization: LoopVectorization, @turbo, indices
using StaticArrayInterface: static_length # used by LoopVectorization
using MuladdMacro: @muladd
using Octavian: Octavian, matmul!
-using Polyester: @batch # You know, the cheapest threads you can find...
+using Polyester: Polyester, @batch # You know, the cheapest threads you can find...
using OffsetArrays: OffsetArray, OffsetVector
using P4est
using Setfield: @set
diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl
index ac312c57c89..fbdcd73e2a8 100644
--- a/src/semidiscretization/semidiscretization.jl
+++ b/src/semidiscretization/semidiscretization.jl
@@ -70,7 +70,15 @@ end
Wrap the semidiscretization `semi` as an ODE problem in the time interval `tspan`
that can be passed to `solve` from the [SciML ecosystem](https://diffeq.sciml.ai/latest/).
"""
-function semidiscretize(semi::AbstractSemidiscretization, tspan)
+function semidiscretize(semi::AbstractSemidiscretization, tspan;
+ reset_threads = true)
+ # Optionally reset Polyester.jl threads. See
+ # https://github.com/trixi-framework/Trixi.jl/issues/1583
+ # https://github.com/JuliaSIMD/Polyester.jl/issues/30
+ if reset_threads
+ Polyester.reset_threads!()
+ end
+
u0_ode = compute_coefficients(first(tspan), semi)
# TODO: MPI, do we want to synchronize loading and print debug statements, e.g. using
# mpi_isparallel() && MPI.Barrier(mpi_comm())
@@ -88,7 +96,15 @@ that can be passed to `solve` from the [SciML ecosystem](https://diffeq.sciml.ai
The initial condition etc. is taken from the `restart_file`.
"""
function semidiscretize(semi::AbstractSemidiscretization, tspan,
- restart_file::AbstractString)
+ restart_file::AbstractString;
+ reset_threads = true)
+ # Optionally reset Polyester.jl threads. See
+ # https://github.com/trixi-framework/Trixi.jl/issues/1583
+ # https://github.com/JuliaSIMD/Polyester.jl/issues/30
+ if reset_threads
+ Polyester.reset_threads!()
+ end
+
u0_ode = load_restart_file(semi, restart_file)
# TODO: MPI, do we want to synchronize loading and print debug statements, e.g. using
# mpi_isparallel() && MPI.Barrier(mpi_comm())
diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
index f54bc744164..8f1e38c891b 100644
--- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
+++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
@@ -274,7 +274,15 @@ The parabolic right-hand side is the first function of the split ODE problem and
will be used by default by the implicit part of IMEX methods from the
SciML ecosystem.
"""
-function semidiscretize(semi::SemidiscretizationHyperbolicParabolic, tspan)
+function semidiscretize(semi::SemidiscretizationHyperbolicParabolic, tspan;
+ reset_threads = true)
+ # Optionally reset Polyester.jl threads. See
+ # https://github.com/trixi-framework/Trixi.jl/issues/1583
+ # https://github.com/JuliaSIMD/Polyester.jl/issues/30
+ if reset_threads
+ Polyester.reset_threads!()
+ end
+
u0_ode = compute_coefficients(first(tspan), semi)
# TODO: MPI, do we want to synchronize loading and print debug statements, e.g. using
# mpi_isparallel() && MPI.Barrier(mpi_comm())
From 253f63ef042ef3f10ca15c5d21327a3b6ce4bcdc Mon Sep 17 00:00:00 2001
From: Jesse Chan <1156048+jlchan@users.noreply.github.com>
Date: Mon, 24 Jul 2023 22:46:27 -0500
Subject: [PATCH 26/40] Fix CI failures related to Makie (#1586)
* unrelated cleanup
* fix CI issues?
---
ext/TrixiMakieExt.jl | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/ext/TrixiMakieExt.jl b/ext/TrixiMakieExt.jl
index 1eb11f6a422..8cd7576a6e5 100644
--- a/ext/TrixiMakieExt.jl
+++ b/ext/TrixiMakieExt.jl
@@ -335,7 +335,7 @@ end
# ================== new Makie plot recipes ====================
# This initializes a Makie recipe, which creates a new type definition which Makie uses to create
-# custom `trixiheatmap` plots. See also https://makie.juliaplots.org/stable/recipes.html
+# custom `trixiheatmap` plots. See also https://docs.makie.org/stable/documentation/recipes/
Makie.@recipe(TrixiHeatmap, plot_data_series) do scene
Makie.Theme(colormap = default_Makie_colormap())
end
@@ -346,9 +346,8 @@ function Makie.plot!(myplot::TrixiHeatmap)
plotting_mesh = global_plotting_triangulation_makie(pds;
set_z_coordinate_zero = true)
- @unpack variable_id = pds
pd = pds.plot_data
- solution_z = vec(StructArrays.component(pd.data, variable_id))
+ solution_z = vec(StructArrays.component(pd.data, pds.variable_id))
Makie.mesh!(myplot, plotting_mesh, color = solution_z, shading = false,
colormap = myplot[:colormap])
myplot.colorrange = extrema(solution_z)
@@ -411,7 +410,7 @@ function Makie.plot!(fig, pd::PlotData2DTriangulated;
row = row_list[variable_to_plot]
col = col_list[variable_to_plot]
- Makie.Colorbar(fig[row, col][1, 2], plt)
+ Makie.Colorbar(fig[row, col][1, 2], colormap = colormap)
ax.aspect = Makie.DataAspect() # equal aspect ratio
ax.title = variable_name
From 6e7e3b5bfb4e4a232f04a9b0d3c711ad414a56c2 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 25 Jul 2023 07:08:05 +0200
Subject: [PATCH 27/40] activate previously broken test and add allocation
tests (#1582)
* activate previously broken test and add allocation tests
* fix allocations in prolong2interfaces!
* fix allocations in calc_sources!
* fix allocations in apply_jacobian!
* fixed FDSBP, elixir_euler_convergence.jl
* elixir_euler_triangulate_pkg_mesh.jl is only broken with multithreading
* Update test_threaded.jl
* Update test_threaded.jl
---
src/solvers/dgsem_tree/dg_1d.jl | 20 ++--
src/solvers/dgsem_tree/dg_2d.jl | 25 +++--
src/solvers/dgsem_tree/dg_3d.jl | 29 +++---
test/test_threaded.jl | 175 ++++++++++++++++++++++++++++++--
4 files changed, 212 insertions(+), 37 deletions(-)
diff --git a/src/solvers/dgsem_tree/dg_1d.jl b/src/solvers/dgsem_tree/dg_1d.jl
index c66f427cce3..b5bb076f3b7 100644
--- a/src/solvers/dgsem_tree/dg_1d.jl
+++ b/src/solvers/dgsem_tree/dg_1d.jl
@@ -385,15 +385,17 @@ end
function prolong2interfaces!(cache, u,
mesh::TreeMesh{1}, equations, surface_integral, dg::DG)
@unpack interfaces = cache
+ @unpack neighbor_ids = interfaces
+ interfaces_u = interfaces.u
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
# interface in x-direction
for v in eachvariable(equations)
- interfaces.u[1, v, interface] = u[v, nnodes(dg), left_element]
- interfaces.u[2, v, interface] = u[v, 1, right_element]
+ interfaces_u[1, v, interface] = u[v, nnodes(dg), left_element]
+ interfaces_u[2, v, interface] = u[v, 1, right_element]
end
end
@@ -621,8 +623,10 @@ end
function apply_jacobian!(du, mesh::Union{TreeMesh{1}, StructuredMesh{1}},
equations, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = -cache.elements.inverse_jacobian[element]
+ factor = -inverse_jacobian[element]
for i in eachnode(dg)
for v in eachvariable(equations)
@@ -642,11 +646,13 @@ end
function calc_sources!(du, u, t, source_terms,
equations::AbstractEquations{1}, dg::DG, cache)
+ @unpack node_coordinates = cache.elements
+
@threaded for element in eachelement(dg, cache)
for i in eachnode(dg)
u_local = get_node_vars(u, equations, dg, i, element)
- x_local = get_node_coords(cache.elements.node_coordinates, equations, dg, i,
- element)
+ x_local = get_node_coords(node_coordinates, equations, dg,
+ i, element)
du_local = source_terms(u_local, x_local, t, equations)
add_to_node_vars!(du, du_local, equations, dg, i, element)
end
diff --git a/src/solvers/dgsem_tree/dg_2d.jl b/src/solvers/dgsem_tree/dg_2d.jl
index d3227710686..6c5e0cee0cf 100644
--- a/src/solvers/dgsem_tree/dg_2d.jl
+++ b/src/solvers/dgsem_tree/dg_2d.jl
@@ -529,23 +529,24 @@ end
function prolong2interfaces!(cache, u,
mesh::TreeMesh{2}, equations, surface_integral, dg::DG)
@unpack interfaces = cache
- @unpack orientations = interfaces
+ @unpack orientations, neighbor_ids = interfaces
+ interfaces_u = interfaces.u
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
if orientations[interface] == 1
# interface in x-direction
for j in eachnode(dg), v in eachvariable(equations)
- interfaces.u[1, v, j, interface] = u[v, nnodes(dg), j, left_element]
- interfaces.u[2, v, j, interface] = u[v, 1, j, right_element]
+ interfaces_u[1, v, j, interface] = u[v, nnodes(dg), j, left_element]
+ interfaces_u[2, v, j, interface] = u[v, 1, j, right_element]
end
else # if orientations[interface] == 2
# interface in y-direction
for i in eachnode(dg), v in eachvariable(equations)
- interfaces.u[1, v, i, interface] = u[v, i, nnodes(dg), left_element]
- interfaces.u[2, v, i, interface] = u[v, i, 1, right_element]
+ interfaces_u[1, v, i, interface] = u[v, i, nnodes(dg), left_element]
+ interfaces_u[2, v, i, interface] = u[v, i, 1, right_element]
end
end
end
@@ -1116,8 +1117,10 @@ end
function apply_jacobian!(du, mesh::TreeMesh{2},
equations, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = -cache.elements.inverse_jacobian[element]
+ factor = -inverse_jacobian[element]
for j in eachnode(dg), i in eachnode(dg)
for v in eachvariable(equations)
@@ -1137,11 +1140,13 @@ end
function calc_sources!(du, u, t, source_terms,
equations::AbstractEquations{2}, dg::DG, cache)
+ @unpack node_coordinates = cache.elements
+
@threaded for element in eachelement(dg, cache)
for j in eachnode(dg), i in eachnode(dg)
u_local = get_node_vars(u, equations, dg, i, j, element)
- x_local = get_node_coords(cache.elements.node_coordinates, equations, dg, i,
- j, element)
+ x_local = get_node_coords(node_coordinates, equations, dg,
+ i, j, element)
du_local = source_terms(u_local, x_local, t, equations)
add_to_node_vars!(du, du_local, equations, dg, i, j, element)
end
diff --git a/src/solvers/dgsem_tree/dg_3d.jl b/src/solvers/dgsem_tree/dg_3d.jl
index 95abb2595e5..acdab900cd1 100644
--- a/src/solvers/dgsem_tree/dg_3d.jl
+++ b/src/solvers/dgsem_tree/dg_3d.jl
@@ -598,32 +598,33 @@ end
function prolong2interfaces!(cache, u,
mesh::TreeMesh{3}, equations, surface_integral, dg::DG)
@unpack interfaces = cache
- @unpack orientations = interfaces
+ @unpack orientations, neighbor_ids = interfaces
+ interfaces_u = interfaces.u
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
if orientations[interface] == 1
# interface in x-direction
for k in eachnode(dg), j in eachnode(dg), v in eachvariable(equations)
- interfaces.u[1, v, j, k, interface] = u[v, nnodes(dg), j, k,
+ interfaces_u[1, v, j, k, interface] = u[v, nnodes(dg), j, k,
left_element]
- interfaces.u[2, v, j, k, interface] = u[v, 1, j, k, right_element]
+ interfaces_u[2, v, j, k, interface] = u[v, 1, j, k, right_element]
end
elseif orientations[interface] == 2
# interface in y-direction
for k in eachnode(dg), i in eachnode(dg), v in eachvariable(equations)
- interfaces.u[1, v, i, k, interface] = u[v, i, nnodes(dg), k,
+ interfaces_u[1, v, i, k, interface] = u[v, i, nnodes(dg), k,
left_element]
- interfaces.u[2, v, i, k, interface] = u[v, i, 1, k, right_element]
+ interfaces_u[2, v, i, k, interface] = u[v, i, 1, k, right_element]
end
else # if orientations[interface] == 3
# interface in z-direction
for j in eachnode(dg), i in eachnode(dg), v in eachvariable(equations)
- interfaces.u[1, v, i, j, interface] = u[v, i, j, nnodes(dg),
+ interfaces_u[1, v, i, j, interface] = u[v, i, j, nnodes(dg),
left_element]
- interfaces.u[2, v, i, j, interface] = u[v, i, j, 1, right_element]
+ interfaces_u[2, v, i, j, interface] = u[v, i, j, 1, right_element]
end
end
end
@@ -1350,8 +1351,10 @@ end
function apply_jacobian!(du, mesh::TreeMesh{3},
equations, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = -cache.elements.inverse_jacobian[element]
+ factor = -inverse_jacobian[element]
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
for v in eachvariable(equations)
@@ -1371,11 +1374,13 @@ end
function calc_sources!(du, u, t, source_terms,
equations::AbstractEquations{3}, dg::DG, cache)
+ @unpack node_coordinates = cache.elements
+
@threaded for element in eachelement(dg, cache)
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
u_local = get_node_vars(u, equations, dg, i, j, k, element)
- x_local = get_node_coords(cache.elements.node_coordinates, equations, dg, i,
- j, k, element)
+ x_local = get_node_coords(node_coordinates, equations, dg,
+ i, j, k, element)
du_local = source_terms(u_local, x_local, t, equations)
add_to_node_vars!(du, du_local, equations, dg, i, j, k, element)
end
diff --git a/test/test_threaded.jl b/test/test_threaded.jl
index 323d12d7091..77fa16ad33e 100644
--- a/test/test_threaded.jl
+++ b/test/test_threaded.jl
@@ -16,6 +16,15 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
# Expected errors are exactly the same as in the serial test!
l2 = [7.81674284320524e-6],
linf = [6.314906965243505e-5])
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_advection_restart.jl with threaded time integration" begin
@@ -30,12 +39,30 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
@test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_amr_refine_twice.jl"),
l2 = [0.00020547512522578292],
linf = [0.007831753383083506])
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_advection_amr_coarsen_twice.jl" begin
@test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_amr_coarsen_twice.jl"),
l2 = [0.0014321062757891826],
linf = [0.0253454486893413])
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin
@@ -43,12 +70,30 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
l2 = [2.259440511766445e-6, 2.318888155713922e-6, 2.3188881557894307e-6, 6.3327863238858925e-6],
linf = [1.498738264560373e-5, 1.9182011928187137e-5, 1.918201192685487e-5, 6.0526717141407005e-5],
rtol = 0.001)
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_euler_ec.jl" begin
@test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl"),
l2 = [0.061751715597716854, 0.05018223615408711, 0.05018989446443463, 0.225871559730513],
linf = [0.29347582879608825, 0.31081249232844693, 0.3107380389947736, 1.0540358049885143])
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_advection_diffusion.jl" begin
@@ -58,6 +103,47 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
l2 = [4.0915532997994255e-6],
linf = [2.3040850347877395e-5]
)
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
+ end
+
+ @trixi_testset "FDSBP, elixir_advection_extended.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_2d_fdsbp", "elixir_advection_extended.jl"),
+ l2 = [2.898644263922225e-6],
+ linf = [8.491517930142578e-6],
+ rtol = 1.0e-7) # These results change a little bit and depend on the CI system
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
+ end
+
+ @trixi_testset "FDSBP, elixir_euler_convergence.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_2d_fdsbp", "elixir_euler_convergence.jl"),
+ l2 = [1.7088389997042244e-6, 1.7437997855125774e-6, 1.7437997855350776e-6, 5.457223460127621e-6],
+ linf = [9.796504903736292e-6, 9.614745892783105e-6, 9.614745892783105e-6, 4.026107182575345e-5],
+ tspan = (0.0, 0.1))
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
end
@@ -70,6 +156,15 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
rtol = 5.0e-5, # Higher tolerance to make tests pass in CI (in particular with macOS)
elixir_file="elixir_advection_waving_flag.jl",
restart_file="restart_000021.h5")
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_mhd_ec.jl" begin
@@ -81,6 +176,15 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
0.9757376320946505, 0.12123736788315098, 0.12837436699267113, 0.17793825293524734,
0.03460761690059514],
tspan = (0.0, 0.3))
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
end
@@ -93,6 +197,15 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
linf = [0.36236334472179443, 0.3690785638275256, 0.8475748723784078, 0.0,
8.881784197001252e-16, 1.7763568394002505e-15, 1.7763568394002505e-15],
tspan = (0.0, 5.0))
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
end
@@ -102,6 +215,15 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
@test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"),
l2 = [0.0034516244508588046, 0.0023420334036925493, 0.0024261923964557187, 0.004731710454271893],
linf = [0.04155789011775046, 0.024772109862748914, 0.03759938693042297, 0.08039824959535657])
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_eulergravity_convergence.jl" begin
@@ -123,17 +245,32 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
l2 = [0.006400337855843578, 0.005303799804137764, 0.005303799804119745, 0.013204169007030144],
linf = [0.03798302318566282, 0.05321027922532284, 0.05321027922605448, 0.13392025411839015],
)
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_euler_curved.jl with threaded time integration" begin
- @test_broken false
- # TODO: This is currently broken and needs to be fixed upstream
- # See https://github.com/JuliaSIMD/StrideArrays.jl/issues/77
- # @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", "elixir_euler_curved.jl"),
- # alg = RDPK3SpFSAL49(thread = OrdinaryDiffEq.True()),
- # l2 = [1.720476068165337e-5, 1.592168205710526e-5, 1.592168205812963e-5, 4.894094865697305e-5],
- # linf = [0.00010525416930584619, 0.00010003778091061122, 0.00010003778085621029, 0.00036426282101720275]
- # )
+ @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", "elixir_euler_curved.jl"),
+ alg = RDPK3SpFSAL49(thread = OrdinaryDiffEq.True()),
+ l2 = [1.720476068165337e-5, 1.592168205710526e-5, 1.592168205812963e-5, 4.894094865697305e-5],
+ linf = [0.00010525416930584619, 0.00010003778091061122, 0.00010003778085621029, 0.00036426282101720275]
+ )
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_euler_triangulate_pkg_mesh.jl" begin
@@ -141,6 +278,19 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
l2 = [2.344080455438114e-6, 1.8610038753097983e-6, 2.4095165666095305e-6, 6.373308158814308e-6],
linf = [2.5099852761334418e-5, 2.2683684021362893e-5, 2.6180448559287584e-5, 5.5752932611508044e-5]
)
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ if (Threads.nthreads() < 2) || (VERSION < v"1.9")
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ else
+ @test_broken (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
+ end
end
@trixi_testset "elixir_euler_fdsbp_periodic.jl" begin
@@ -148,6 +298,15 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
l2 = [1.3333320340010056e-6, 2.044834627970641e-6, 2.044834627855601e-6, 5.282189803559564e-6],
linf = [2.7000151718858945e-6, 3.988595028259212e-6, 3.9885950273710336e-6, 8.848583042286862e-6]
)
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
end
end
From 3dd2cb60d0798a5a9a327c73e6150382636c7845 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 25 Jul 2023 08:59:37 +0200
Subject: [PATCH 28/40] set version to v0.5.36
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 00bf2718d8b..2017290c785 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.36-pre"
+version = "0.5.36"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 41b56ef71c535321fca8c99fe9d7b2098b70025d Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 25 Jul 2023 08:59:47 +0200
Subject: [PATCH 29/40] set development version to v0.5.37-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 2017290c785..94c47a35ac1 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.36"
+version = "0.5.37-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From fe6a818a8459d6beef3969c1fd2d5cc7ddf596df Mon Sep 17 00:00:00 2001
From: Ahmad Peyvan <115842305+apey236@users.noreply.github.com>
Date: Tue, 25 Jul 2023 04:17:42 -0400
Subject: [PATCH 30/40] Adding parabolic terms for `P4estMesh{3}` (#1555)
* Adding parabolic terms for 3D P4est mesh
* Adding parabolic terms for 3D P4estMesh
* Adding working parabolic terms for `P4estMesh{3}`
* Formatting
* Addin TGV example and test to `P4estMesh{3}`
* Update src/solvers/dgsem_tree/dg_3d_parabolic.jl
Co-authored-by: Hendrik Ranocha
* Update src/solvers/dgsem_tree/dg_3d_parabolic.jl
Co-authored-by: Hendrik Ranocha
* Update src/solvers/dgsem_tree/dg_3d_parabolic.jl
Co-authored-by: Hendrik Ranocha
* Update src/solvers/dgsem_tree/dg_3d_parabolic.jl
Co-authored-by: Hendrik Ranocha
* Removing comments
* Removed comments
* Adding TGV test for `P4estMesh{3}`
* Correcting the format
* Format correction
* Remove .toml file
* Format correction
* Optimized loop for speed
---------
Co-authored-by: Hendrik Ranocha
Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com>
---
.../elixir_navierstokes_convergence.jl | 263 +++++++
...elixir_navierstokes_taylor_green_vortex.jl | 82 +++
src/callbacks_step/analysis_dg3d.jl | 2 +-
src/solvers/dgsem_p4est/dg.jl | 1 +
src/solvers/dgsem_p4est/dg_2d_parabolic.jl | 2 +-
src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 691 ++++++++++++++++++
src/solvers/dgsem_tree/dg_3d_parabolic.jl | 13 +-
test/test_parabolic_3d.jl | 19 +-
8 files changed, 1063 insertions(+), 10 deletions(-)
create mode 100644 examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
create mode 100644 examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl
create mode 100644 src/solvers/dgsem_p4est/dg_3d_parabolic.jl
diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
new file mode 100644
index 00000000000..c426fe95f5b
--- /dev/null
+++ b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
@@ -0,0 +1,263 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the ideal compressible Navier-Stokes equations
+
+prandtl_number() = 0.72
+mu() = 0.01
+
+equations = CompressibleEulerEquations3D(1.4)
+equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu=mu(), Prandtl=prandtl_number(),
+ gradient_variables=GradientVariablesPrimitive())
+
+# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux
+solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs,
+ volume_integral=VolumeIntegralWeakForm())
+
+coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z))
+coordinates_max = ( 1.0, 1.0, 1.0) # maximum coordinates (max(x), max(y), max(z))
+
+trees_per_dimension = (2, 2, 2)
+
+mesh = P4estMesh(trees_per_dimension, polydeg=3,
+ coordinates_min=coordinates_min, coordinates_max=coordinates_max,
+ periodicity=(true, false, true), initial_refinement_level=2)
+
+# Note: the initial condition cannot be specialized to `CompressibleNavierStokesDiffusion3D`
+# since it is called by both the parabolic solver (which passes in `CompressibleNavierStokesDiffusion3D`)
+# and by the initial condition (which passes in `CompressibleEulerEquations3D`).
+# This convergence test setup was originally derived by Andrew Winters (@andrewwinters5000)
+function initial_condition_navier_stokes_convergence_test(x, t, equations)
+ # Constants. OBS! Must match those in `source_terms_navier_stokes_convergence_test`
+ c = 2.0
+ A1 = 0.5
+ A2 = 1.0
+ A3 = 0.5
+
+ # Convenience values for trig. functions
+ pi_x = pi * x[1]
+ pi_y = pi * x[2]
+ pi_z = pi * x[3]
+ pi_t = pi * t
+
+ rho = c + A1 * sin(pi_x) * cos(pi_y) * sin(pi_z) * cos(pi_t)
+ v1 = A2 * sin(pi_x) * log(x[2] + 2.0) * (1.0 - exp(-A3 * (x[2] - 1.0))) * sin(pi_z) * cos(pi_t)
+ v2 = v1
+ v3 = v1
+ p = rho^2
+
+ return prim2cons(SVector(rho, v1, v2, v3, p), equations)
+end
+
+@inline function source_terms_navier_stokes_convergence_test(u, x, t, equations)
+ # TODO: parabolic
+ # we currently need to hardcode these parameters until we fix the "combined equation" issue
+ # see also https://github.com/trixi-framework/Trixi.jl/pull/1160
+ inv_gamma_minus_one = inv(equations.gamma - 1)
+ Pr = prandtl_number()
+ mu_ = mu()
+
+ # Constants. OBS! Must match those in `initial_condition_navier_stokes_convergence_test`
+ c = 2.0
+ A1 = 0.5
+ A2 = 1.0
+ A3 = 0.5
+
+ # Convenience values for trig. functions
+ pi_x = pi * x[1]
+ pi_y = pi * x[2]
+ pi_z = pi * x[3]
+ pi_t = pi * t
+
+ # Define auxiliary functions for the strange function of the y variable
+ # to make expressions easier to read
+ g = log(x[2] + 2.0) * (1.0 - exp(-A3 * (x[2] - 1.0)))
+ g_y = ( A3 * log(x[2] + 2.0) * exp(-A3 * (x[2] - 1.0))
+ + (1.0 - exp(-A3 * (x[2] - 1.0))) / (x[2] + 2.0) )
+ g_yy = ( 2.0 * A3 * exp(-A3 * (x[2] - 1.0)) / (x[2] + 2.0)
+ - (1.0 - exp(-A3 * (x[2] - 1.0))) / ((x[2] + 2.0)^2)
+ - A3^2 * log(x[2] + 2.0) * exp(-A3 * (x[2] - 1.0)) )
+
+ # Density and its derivatives
+ rho = c + A1 * sin(pi_x) * cos(pi_y) * sin(pi_z) * cos(pi_t)
+ rho_t = -pi * A1 * sin(pi_x) * cos(pi_y) * sin(pi_z) * sin(pi_t)
+ rho_x = pi * A1 * cos(pi_x) * cos(pi_y) * sin(pi_z) * cos(pi_t)
+ rho_y = -pi * A1 * sin(pi_x) * sin(pi_y) * sin(pi_z) * cos(pi_t)
+ rho_z = pi * A1 * sin(pi_x) * cos(pi_y) * cos(pi_z) * cos(pi_t)
+ rho_xx = -pi^2 * (rho - c)
+ rho_yy = -pi^2 * (rho - c)
+ rho_zz = -pi^2 * (rho - c)
+
+ # Velocities and their derivatives
+ # v1 terms
+ v1 = A2 * sin(pi_x) * g * sin(pi_z) * cos(pi_t)
+ v1_t = -pi * A2 * sin(pi_x) * g * sin(pi_z) * sin(pi_t)
+ v1_x = pi * A2 * cos(pi_x) * g * sin(pi_z) * cos(pi_t)
+ v1_y = A2 * sin(pi_x) * g_y * sin(pi_z) * cos(pi_t)
+ v1_z = pi * A2 * sin(pi_x) * g * cos(pi_z) * cos(pi_t)
+ v1_xx = -pi^2 * v1
+ v1_yy = A2 * sin(pi_x) * g_yy * sin(pi_z) * cos(pi_t)
+ v1_zz = -pi^2 * v1
+ v1_xy = pi * A2 * cos(pi_x) * g_y * sin(pi_z) * cos(pi_t)
+ v1_xz = pi^2 * A2 * cos(pi_x) * g * cos(pi_z) * cos(pi_t)
+ v1_yz = pi * A2 * sin(pi_x) * g_y * cos(pi_z) * cos(pi_t)
+ # v2 terms (simplifies from ansatz)
+ v2 = v1
+ v2_t = v1_t
+ v2_x = v1_x
+ v2_y = v1_y
+ v2_z = v1_z
+ v2_xx = v1_xx
+ v2_yy = v1_yy
+ v2_zz = v1_zz
+ v2_xy = v1_xy
+ v2_yz = v1_yz
+ # v3 terms (simplifies from ansatz)
+ v3 = v1
+ v3_t = v1_t
+ v3_x = v1_x
+ v3_y = v1_y
+ v3_z = v1_z
+ v3_xx = v1_xx
+ v3_yy = v1_yy
+ v3_zz = v1_zz
+ v3_xz = v1_xz
+ v3_yz = v1_yz
+
+ # Pressure and its derivatives
+ p = rho^2
+ p_t = 2.0 * rho * rho_t
+ p_x = 2.0 * rho * rho_x
+ p_y = 2.0 * rho * rho_y
+ p_z = 2.0 * rho * rho_z
+
+ # Total energy and its derivatives; simiplifies from ansatz that v2 = v1 and v3 = v1
+ E = p * inv_gamma_minus_one + 1.5 * rho * v1^2
+ E_t = p_t * inv_gamma_minus_one + 1.5 * rho_t * v1^2 + 3.0 * rho * v1 * v1_t
+ E_x = p_x * inv_gamma_minus_one + 1.5 * rho_x * v1^2 + 3.0 * rho * v1 * v1_x
+ E_y = p_y * inv_gamma_minus_one + 1.5 * rho_y * v1^2 + 3.0 * rho * v1 * v1_y
+ E_z = p_z * inv_gamma_minus_one + 1.5 * rho_z * v1^2 + 3.0 * rho * v1 * v1_z
+
+ # Divergence of Fick's law ∇⋅∇q = kappa ∇⋅∇T; simplifies because p = rho², so T = p/rho = rho
+ kappa = equations.gamma * inv_gamma_minus_one / Pr
+ q_xx = kappa * rho_xx # kappa T_xx
+ q_yy = kappa * rho_yy # kappa T_yy
+ q_zz = kappa * rho_zz # kappa T_zz
+
+ # Stress tensor and its derivatives (exploit symmetry)
+ tau11 = 4.0 / 3.0 * v1_x - 2.0 / 3.0 * (v2_y + v3_z)
+ tau12 = v1_y + v2_x
+ tau13 = v1_z + v3_x
+ tau22 = 4.0 / 3.0 * v2_y - 2.0 / 3.0 * (v1_x + v3_z)
+ tau23 = v2_z + v3_y
+ tau33 = 4.0 / 3.0 * v3_z - 2.0 / 3.0 * (v1_x + v2_y)
+
+ tau11_x = 4.0 / 3.0 * v1_xx - 2.0 / 3.0 * (v2_xy + v3_xz)
+ tau12_x = v1_xy + v2_xx
+ tau13_x = v1_xz + v3_xx
+
+ tau12_y = v1_yy + v2_xy
+ tau22_y = 4.0 / 3.0 * v2_yy - 2.0 / 3.0 * (v1_xy + v3_yz)
+ tau23_y = v2_yz + v3_yy
+
+ tau13_z = v1_zz + v3_xz
+ tau23_z = v2_zz + v3_yz
+ tau33_z = 4.0 / 3.0 * v3_zz - 2.0 / 3.0 * (v1_xz + v2_yz)
+
+ # Compute the source terms
+ # Density equation
+ du1 = ( rho_t + rho_x * v1 + rho * v1_x
+ + rho_y * v2 + rho * v2_y
+ + rho_z * v3 + rho * v3_z )
+ # x-momentum equation
+ du2 = ( rho_t * v1 + rho * v1_t + p_x + rho_x * v1^2
+ + 2.0 * rho * v1 * v1_x
+ + rho_y * v1 * v2
+ + rho * v1_y * v2
+ + rho * v1 * v2_y
+ + rho_z * v1 * v3
+ + rho * v1_z * v3
+ + rho * v1 * v3_z
+ - mu_ * (tau11_x + tau12_y + tau13_z) )
+ # y-momentum equation
+ du3 = ( rho_t * v2 + rho * v2_t + p_y + rho_x * v1 * v2
+ + rho * v1_x * v2
+ + rho * v1 * v2_x
+ + rho_y * v2^2
+ + 2.0 * rho * v2 * v2_y
+ + rho_z * v2 * v3
+ + rho * v2_z * v3
+ + rho * v2 * v3_z
+ - mu_ * (tau12_x + tau22_y + tau23_z) )
+ # z-momentum equation
+ du4 = ( rho_t * v3 + rho * v3_t + p_z + rho_x * v1 * v3
+ + rho * v1_x * v3
+ + rho * v1 * v3_x
+ + rho_y * v2 * v3
+ + rho * v2_y * v3
+ + rho * v2 * v3_y
+ + rho_z * v3^2
+ + 2.0 * rho * v3 * v3_z
+ - mu_ * (tau13_x + tau23_y + tau33_z) )
+ # Total energy equation
+ du5 = ( E_t + v1_x * (E + p) + v1 * (E_x + p_x)
+ + v2_y * (E + p) + v2 * (E_y + p_y)
+ + v3_z * (E + p) + v3 * (E_z + p_z)
+ # stress tensor and temperature gradient from x-direction
+ - mu_ * ( q_xx + v1_x * tau11 + v2_x * tau12 + v3_x * tau13
+ + v1 * tau11_x + v2 * tau12_x + v3 * tau13_x)
+ # stress tensor and temperature gradient terms from y-direction
+ - mu_ * ( q_yy + v1_y * tau12 + v2_y * tau22 + v3_y * tau23
+ + v1 * tau12_y + v2 * tau22_y + v3 * tau23_y)
+ # stress tensor and temperature gradient terms from z-direction
+ - mu_ * ( q_zz + v1_z * tau13 + v2_z * tau23 + v3_z * tau33
+ + v1 * tau13_z + v2 * tau23_z + v3 * tau33_z) )
+
+ return SVector(du1, du2, du3, du4, du5)
+end
+
+initial_condition = initial_condition_navier_stokes_convergence_test
+
+# BC types
+velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:4])
+heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
+boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
+
+# define inviscid boundary conditions
+boundary_conditions = Dict(
+ :y_neg => boundary_condition_slip_wall,
+ :y_pos => boundary_condition_slip_wall
+ )
+
+# define viscous boundary conditions
+boundary_conditions_parabolic = Dict(
+ :y_neg => boundary_condition_top_bottom,
+ :y_pos => boundary_condition_top_bottom
+ )
+
+semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver;
+ boundary_conditions=(boundary_conditions, boundary_conditions_parabolic),
+ source_terms=source_terms_navier_stokes_convergence_test)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span `tspan`
+tspan = (0.0, 0.2)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+alive_callback = AliveCallback(alive_interval=10)
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
+
+###############################################################################
+# run the simulation
+
+time_int_tol = 1e-8
+sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
+ ode_default_options()..., callback=callbacks)
+summary_callback() # print the timer summary
+
diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl
new file mode 100644
index 00000000000..c5b9ccf2e38
--- /dev/null
+++ b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl
@@ -0,0 +1,82 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the compressible Navier-Stokes equations
+
+# TODO: parabolic; unify names of these accessor functions
+prandtl_number() = 0.72
+mu() = 6.25e-4 # equivalent to Re = 1600
+
+equations = CompressibleEulerEquations3D(1.4)
+equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu=mu(),
+ Prandtl=prandtl_number())
+
+"""
+ initial_condition_taylor_green_vortex(x, t, equations::CompressibleEulerEquations3D)
+
+The classical inviscid Taylor-Green vortex.
+"""
+function initial_condition_taylor_green_vortex(x, t, equations::CompressibleEulerEquations3D)
+ A = 1.0 # magnitude of speed
+ Ms = 0.1 # maximum Mach number
+
+ rho = 1.0
+ v1 = A * sin(x[1]) * cos(x[2]) * cos(x[3])
+ v2 = -A * cos(x[1]) * sin(x[2]) * cos(x[3])
+ v3 = 0.0
+ p = (A / Ms)^2 * rho / equations.gamma # scaling to get Ms
+ p = p + 1.0/16.0 * A^2 * rho * (cos(2*x[1])*cos(2*x[3]) + 2*cos(2*x[2]) + 2*cos(2*x[1]) + cos(2*x[2])*cos(2*x[3]))
+
+ return prim2cons(SVector(rho, v1, v2, v3, p), equations)
+end
+initial_condition = initial_condition_taylor_green_vortex
+
+volume_flux = flux_ranocha
+solver = DGSEM(polydeg=3, surface_flux=flux_hll,
+ volume_integral=VolumeIntegralFluxDifferencing(volume_flux))
+
+coordinates_min = (-1.0, -1.0, -1.0) .* pi
+coordinates_max = ( 1.0, 1.0, 1.0) .* pi
+
+trees_per_dimension = (2, 2, 2)
+
+mesh = P4estMesh(trees_per_dimension, polydeg=3,
+ coordinates_min=coordinates_min, coordinates_max=coordinates_max,
+ periodicity=(true, true, true), initial_refinement_level=2)
+
+
+semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic),
+ initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 20.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 50
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=true,
+ extra_analysis_integrals=(energy_kinetic,
+ energy_internal,
+ enstrophy))
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true,
+ solution_variables=cons2prim)
+alive_callback = AliveCallback(analysis_interval=analysis_interval,)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback,save_solution)
+
+###############################################################################
+# run the simulation
+
+time_int_tol = 1e-8
+sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
+ ode_default_options()..., callback=callbacks)
+summary_callback() # print the timer summary
diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl
index 76aba813fab..3d9b38fd2a5 100644
--- a/src/callbacks_step/analysis_dg3d.jl
+++ b/src/callbacks_step/analysis_dg3d.jl
@@ -228,7 +228,7 @@ function integrate(func::Func, u,
end
function integrate(func::Func, u,
- mesh::TreeMesh{3},
+ mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations, equations_parabolic,
dg::DGSEM,
cache, cache_parabolic; normalize = true) where {Func}
diff --git a/src/solvers/dgsem_p4est/dg.jl b/src/solvers/dgsem_p4est/dg.jl
index a7cc1eee04d..ec50627d3ef 100644
--- a/src/solvers/dgsem_p4est/dg.jl
+++ b/src/solvers/dgsem_p4est/dg.jl
@@ -50,5 +50,6 @@ include("dg_2d.jl")
include("dg_2d_parabolic.jl")
include("dg_3d.jl")
+include("dg_3d_parabolic.jl")
include("dg_parallel.jl")
end # @muladd
diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl
index 73ac47ed1e3..7e90a83a9ca 100644
--- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl
+++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl
@@ -1,7 +1,7 @@
# This method is called when a SemidiscretizationHyperbolicParabolic is constructed.
# It constructs the basic `cache` used throughout the simulation to compute
# the RHS etc.
-function create_cache_parabolic(mesh::P4estMesh, equations_hyperbolic::AbstractEquations,
+function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::AbstractEquations,
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, RealT, uEltype)
balance!(mesh)
diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl
new file mode 100644
index 00000000000..5370c927e05
--- /dev/null
+++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl
@@ -0,0 +1,691 @@
+# This method is called when a SemidiscretizationHyperbolicParabolic is constructed.
+# It constructs the basic `cache` used throughout the simulation to compute
+# the RHS etc.
+function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::AbstractEquations,
+ equations_parabolic::AbstractEquationsParabolic,
+ dg::DG, parabolic_scheme, RealT, uEltype)
+ balance!(mesh)
+
+ elements = init_elements(mesh, equations_hyperbolic, dg.basis, uEltype)
+ interfaces = init_interfaces(mesh, equations_hyperbolic, dg.basis, elements)
+ boundaries = init_boundaries(mesh, equations_hyperbolic, dg.basis, elements)
+
+ n_vars = nvariables(equations_hyperbolic)
+ n_elements = nelements(elements)
+ n_nodes = nnodes(dg.basis) # nodes in one direction
+ u_transformed = Array{uEltype}(undef, n_vars, n_nodes, n_nodes, n_nodes, n_elements)
+ gradients = ntuple(_ -> similar(u_transformed), ndims(mesh))
+ flux_viscous = ntuple(_ -> similar(u_transformed), ndims(mesh))
+
+ cache = (; elements, interfaces, boundaries, gradients, flux_viscous, u_transformed)
+
+ return cache
+end
+
+function calc_gradient!(gradients, u_transformed, t,
+ mesh::P4estMesh{3}, equations_parabolic,
+ boundary_conditions_parabolic, dg::DG,
+ cache, cache_parabolic)
+ gradients_x, gradients_y, gradients_z = gradients
+
+ # Reset du
+ @trixi_timeit timer() "reset gradients" begin
+ reset_du!(gradients_x, dg, cache)
+ reset_du!(gradients_y, dg, cache)
+ reset_du!(gradients_z, dg, cache)
+ end
+
+ # Calculate volume integral
+ @trixi_timeit timer() "volume integral" begin
+ (; derivative_dhat) = dg.basis
+ (; contravariant_vectors) = cache.elements
+
+ @threaded for element in eachelement(dg, cache)
+
+ # Calculate gradients with respect to reference coordinates in one element
+ for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u_transformed, equations_parabolic, dg, i, j, k,
+ element)
+
+ for ii in eachnode(dg)
+ multiply_add_to_node_vars!(gradients_x, derivative_dhat[ii, i],
+ u_node, equations_parabolic, dg, ii, j,
+ k, element)
+ end
+
+ for jj in eachnode(dg)
+ multiply_add_to_node_vars!(gradients_y, derivative_dhat[jj, j],
+ u_node, equations_parabolic, dg, i, jj,
+ k, element)
+ end
+
+ for kk in eachnode(dg)
+ multiply_add_to_node_vars!(gradients_z, derivative_dhat[kk, k],
+ u_node, equations_parabolic, dg, i, j,
+ kk, element)
+ end
+ end
+
+ # now that the reference coordinate gradients are computed, transform them node-by-node to physical gradients
+ # using the contravariant vectors
+ for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
+ Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors,
+ i, j, k, element)
+ Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors,
+ i, j, k, element)
+ Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors,
+ i, j, k, element)
+
+ gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg,
+ i, j, k, element)
+ gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, dg,
+ i, j, k, element)
+ gradients_reference_3 = get_node_vars(gradients_z, equations_parabolic, dg,
+ i, j, k, element)
+
+ # note that the contravariant vectors are transposed compared with computations of flux
+ # divergences in `calc_volume_integral!`. See
+ # https://github.com/trixi-framework/Trixi.jl/pull/1490#discussion_r1213345190
+ # for a more detailed discussion.
+ gradient_x_node = Ja11 * gradients_reference_1 +
+ Ja21 * gradients_reference_2 +
+ Ja31 * gradients_reference_3
+ gradient_y_node = Ja12 * gradients_reference_1 +
+ Ja22 * gradients_reference_2 +
+ Ja32 * gradients_reference_3
+ gradient_z_node = Ja13 * gradients_reference_1 +
+ Ja23 * gradients_reference_2 +
+ Ja33 * gradients_reference_3
+
+ set_node_vars!(gradients_x, gradient_x_node, equations_parabolic, dg,
+ i, j, k, element)
+ set_node_vars!(gradients_y, gradient_y_node, equations_parabolic, dg,
+ i, j, k, element)
+ set_node_vars!(gradients_z, gradient_z_node, equations_parabolic, dg,
+ i, j, k, element)
+ end
+ end
+ end
+
+ # Prolong solution to interfaces
+ @trixi_timeit timer() "prolong2interfaces" begin
+ prolong2interfaces!(cache_parabolic, u_transformed, mesh,
+ equations_parabolic, dg.surface_integral, dg)
+ end
+
+ # Calculate interface fluxes for the gradient. This reuses P4est `calc_interface_flux!` along with a
+ # specialization for AbstractEquationsParabolic.
+ @trixi_timeit timer() "interface flux" begin
+ calc_interface_flux!(cache_parabolic.elements.surface_flux_values,
+ mesh, False(), # False() = no nonconservative terms
+ equations_parabolic, dg.surface_integral, dg, cache_parabolic)
+ end
+
+ # Prolong solution to boundaries
+ @trixi_timeit timer() "prolong2boundaries" begin
+ prolong2boundaries!(cache_parabolic, u_transformed, mesh,
+ equations_parabolic, dg.surface_integral, dg)
+ end
+
+ # Calculate boundary fluxes
+ @trixi_timeit timer() "boundary flux" begin
+ calc_boundary_flux_gradients!(cache_parabolic, t, boundary_conditions_parabolic,
+ mesh, equations_parabolic, dg.surface_integral, dg)
+ end
+
+ # TODO: parabolic; mortars
+ @assert nmortars(dg, cache) == 0
+
+ # Calculate surface integrals
+ @trixi_timeit timer() "surface integral" begin
+ (; boundary_interpolation) = dg.basis
+ (; surface_flux_values) = cache_parabolic.elements
+ (; contravariant_vectors) = cache.elements
+
+ # Access the factors only once before beginning the loop to increase performance.
+ # We also use explicit assignments instead of `+=` to let `@muladd` turn these
+ # into FMAs (see comment at the top of the file).
+ factor_1 = boundary_interpolation[1, 1]
+ factor_2 = boundary_interpolation[nnodes(dg), 2]
+ @threaded for element in eachelement(dg, cache)
+ for l in eachnode(dg), m in eachnode(dg)
+ for v in eachvariable(equations_parabolic)
+ for dim in 1:3
+ grad = gradients[dim]
+ # surface at -x
+ normal_direction = get_normal_direction(1, contravariant_vectors,
+ 1, l, m, element)
+ grad[v, 1, l, m, element] = (grad[v, 1, l, m, element] +
+ surface_flux_values[v, l, m, 1,
+ element] *
+ factor_1 * normal_direction[dim])
+
+ # surface at +x
+ normal_direction = get_normal_direction(2, contravariant_vectors,
+ nnodes(dg), l, m, element)
+ grad[v, nnodes(dg), l, m, element] = (grad[v, nnodes(dg), l, m,
+ element] +
+ surface_flux_values[v, l, m,
+ 2,
+ element] *
+ factor_2 *
+ normal_direction[dim])
+
+ # surface at -y
+ normal_direction = get_normal_direction(3, contravariant_vectors,
+ l, m, 1, element)
+ grad[v, l, 1, m, element] = (grad[v, l, 1, m, element] +
+ surface_flux_values[v, l, m, 3,
+ element] *
+ factor_1 * normal_direction[dim])
+
+ # surface at +y
+ normal_direction = get_normal_direction(4, contravariant_vectors,
+ l, nnodes(dg), m, element)
+ grad[v, l, nnodes(dg), m, element] = (grad[v, l, nnodes(dg), m,
+ element] +
+ surface_flux_values[v, l, m,
+ 4,
+ element] *
+ factor_2 *
+ normal_direction[dim])
+
+ # surface at -z
+ normal_direction = get_normal_direction(5, contravariant_vectors,
+ l, m, 1, element)
+ grad[v, l, m, 1, element] = (grad[v, l, m, 1, element] +
+ surface_flux_values[v, l, m, 5,
+ element] *
+ factor_1 * normal_direction[dim])
+
+ # surface at +z
+ normal_direction = get_normal_direction(6, contravariant_vectors,
+ l, m, nnodes(dg), element)
+ grad[v, l, m, nnodes(dg), element] = (grad[v, l, m, nnodes(dg),
+ element] +
+ surface_flux_values[v, l, m,
+ 6,
+ element] *
+ factor_2 *
+ normal_direction[dim])
+ end
+ end
+ end
+ end
+ end
+
+ # Apply Jacobian from mapping to reference element
+ @trixi_timeit timer() "Jacobian" begin
+ apply_jacobian_parabolic!(gradients_x, mesh, equations_parabolic, dg,
+ cache_parabolic)
+ apply_jacobian_parabolic!(gradients_y, mesh, equations_parabolic, dg,
+ cache_parabolic)
+ apply_jacobian_parabolic!(gradients_z, mesh, equations_parabolic, dg,
+ cache_parabolic)
+ end
+
+ return nothing
+end
+
+# This version is used for parabolic gradient computations
+@inline function calc_interface_flux!(surface_flux_values, mesh::P4estMesh{3},
+ nonconservative_terms::False,
+ equations::AbstractEquationsParabolic,
+ surface_integral, dg::DG, cache,
+ interface_index, normal_direction,
+ primary_i_node_index, primary_j_node_index,
+ primary_direction_index, primary_element_index,
+ secondary_i_node_index, secondary_j_node_index,
+ secondary_direction_index,
+ secondary_element_index)
+ @unpack u = cache.interfaces
+ @unpack surface_flux = surface_integral
+
+ u_ll, u_rr = get_surface_node_vars(u, equations, dg, primary_i_node_index,
+ primary_j_node_index,
+ interface_index)
+
+ flux_ = 0.5 * (u_ll + u_rr) # we assume that the gradient computations utilize a central flux
+
+ # Note that we don't flip the sign on the secondondary flux. This is because for parabolic terms,
+ # the normals are not embedded in `flux_` for the parabolic gradient computations.
+ for v in eachvariable(equations)
+ surface_flux_values[v, primary_i_node_index, primary_j_node_index, primary_direction_index, primary_element_index] = flux_[v]
+ surface_flux_values[v, secondary_i_node_index, secondary_j_node_index, secondary_direction_index, secondary_element_index] = flux_[v]
+ end
+end
+
+# This is the version used when calculating the divergence of the viscous fluxes
+function calc_volume_integral!(du, flux_viscous,
+ mesh::P4estMesh{3},
+ equations_parabolic::AbstractEquationsParabolic,
+ dg::DGSEM, cache)
+ (; derivative_dhat) = dg.basis
+ (; contravariant_vectors) = cache.elements
+ flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous
+
+ @threaded for element in eachelement(dg, cache)
+ # Calculate volume terms in one element
+ for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
+ flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, k, element)
+ flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, k, element)
+ flux3 = get_node_vars(flux_viscous_z, equations_parabolic, dg, i, j, k, element)
+
+ # Compute the contravariant flux by taking the scalar product of the
+ # first contravariant vector Ja^1 and the flux vector
+ Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, k,
+ element)
+ contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 + Ja13 * flux3
+ for ii in eachnode(dg)
+ multiply_add_to_node_vars!(du, derivative_dhat[ii, i], contravariant_flux1,
+ equations_parabolic, dg, ii, j, k, element)
+ end
+
+ # Compute the contravariant flux by taking the scalar product of the
+ # second contravariant vector Ja^2 and the flux vector
+ Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, k,
+ element)
+ contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 + Ja23 * flux3
+ for jj in eachnode(dg)
+ multiply_add_to_node_vars!(du, derivative_dhat[jj, j], contravariant_flux2,
+ equations_parabolic, dg, i, jj, k, element)
+ end
+
+ # Compute the contravariant flux by taking the scalar product of the
+ # second contravariant vector Ja^2 and the flux vector
+ Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k,
+ element)
+ contravariant_flux3 = Ja31 * flux1 + Ja32 * flux2 + Ja33 * flux3
+ for kk in eachnode(dg)
+ multiply_add_to_node_vars!(du, derivative_dhat[kk, k], contravariant_flux3,
+ equations_parabolic, dg, i, j, kk, element)
+ end
+ end
+ end
+
+ return nothing
+end
+
+# This is the version used when calculating the divergence of the viscous fluxes
+# We pass the `surface_integral` argument solely for dispatch
+function prolong2interfaces!(cache_parabolic, flux_viscous,
+ mesh::P4estMesh{3},
+ equations_parabolic::AbstractEquationsParabolic,
+ surface_integral, dg::DG, cache)
+ (; interfaces) = cache_parabolic
+ (; contravariant_vectors) = cache_parabolic.elements
+ index_range = eachnode(dg)
+ flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous
+
+ @threaded for interface in eachinterface(dg, cache)
+ # Copy solution data from the primary element using "delayed indexing" with
+ # a start value and a step size to get the correct face and orientation.
+ # Note that in the current implementation, the interface will be
+ # "aligned at the primary element", i.e., the index of the primary side
+ # will always run forwards.
+ primary_element = interfaces.neighbor_ids[1, interface]
+ primary_indices = interfaces.node_indices[1, interface]
+ primary_direction = indices2direction(primary_indices)
+
+ i_primary_start, i_primary_step_i, i_primary_step_j = index_to_start_step_3d(primary_indices[1],
+ index_range)
+ j_primary_start, j_primary_step_i, j_primary_step_j = index_to_start_step_3d(primary_indices[2],
+ index_range)
+ k_primary_start, k_primary_step_i, k_primary_step_j = index_to_start_step_3d(primary_indices[3],
+ index_range)
+
+ i_primary = i_primary_start
+ j_primary = j_primary_start
+ k_primary = k_primary_start
+
+ for j in eachnode(dg)
+ for i in eachnode(dg)
+ # this is the outward normal direction on the primary element
+ normal_direction = get_normal_direction(primary_direction,
+ contravariant_vectors,
+ i_primary, j_primary, k_primary,
+ primary_element)
+
+ for v in eachvariable(equations_parabolic)
+ # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
+ flux_viscous = SVector(flux_viscous_x[v, i_primary, j_primary,
+ k_primary,
+ primary_element],
+ flux_viscous_y[v, i_primary, j_primary,
+ k_primary,
+ primary_element],
+ flux_viscous_z[v, i_primary, j_primary,
+ k_primary,
+ primary_element])
+
+ interfaces.u[1, v, i, j, interface] = dot(flux_viscous,
+ normal_direction)
+ end
+ i_primary += i_primary_step_i
+ j_primary += j_primary_step_i
+ k_primary += k_primary_step_i
+ end
+ i_primary += i_primary_step_j
+ j_primary += j_primary_step_j
+ k_primary += k_primary_step_j
+ end
+
+ # Copy solution data from the secondary element using "delayed indexing" with
+ # a start value and a step size to get the correct face and orientation.
+ secondary_element = interfaces.neighbor_ids[2, interface]
+ secondary_indices = interfaces.node_indices[2, interface]
+ secondary_direction = indices2direction(secondary_indices)
+
+ i_secondary_start, i_secondary_step_i, i_secondary_step_j = index_to_start_step_3d(secondary_indices[1],
+ index_range)
+ j_secondary_start, j_secondary_step_i, j_secondary_step_j = index_to_start_step_3d(secondary_indices[2],
+ index_range)
+ k_secondary_start, k_secondary_step_i, k_secondary_step_j = index_to_start_step_3d(secondary_indices[3],
+ index_range)
+
+ i_secondary = i_secondary_start
+ j_secondary = j_secondary_start
+ k_secondary = k_secondary_start
+ for j in eachnode(dg)
+ for i in eachnode(dg)
+ # This is the outward normal direction on the secondary element.
+ # Here, we assume that normal_direction on the secondary element is
+ # the negative of normal_direction on the primary element.
+ normal_direction = get_normal_direction(secondary_direction,
+ contravariant_vectors,
+ i_secondary, j_secondary,
+ k_secondary,
+ secondary_element)
+
+ for v in eachvariable(equations_parabolic)
+ # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
+ flux_viscous = SVector(flux_viscous_x[v, i_secondary, j_secondary,
+ k_secondary,
+ secondary_element],
+ flux_viscous_y[v, i_secondary, j_secondary,
+ k_secondary,
+ secondary_element],
+ flux_viscous_z[v, i_secondary, j_secondary,
+ k_secondary,
+ secondary_element])
+ # store the normal flux with respect to the primary normal direction
+ interfaces.u[2, v, i, j, interface] = -dot(flux_viscous,
+ normal_direction)
+ end
+ i_secondary += i_secondary_step_i
+ j_secondary += j_secondary_step_i
+ k_secondary += k_secondary_step_i
+ end
+ i_secondary += i_secondary_step_j
+ j_secondary += j_secondary_step_j
+ k_secondary += k_secondary_step_j
+ end
+ end
+
+ return nothing
+end
+
+# This version is used for divergence flux computations
+function calc_interface_flux!(surface_flux_values,
+ mesh::P4estMesh{3}, equations_parabolic,
+ dg::DG, cache_parabolic)
+ (; neighbor_ids, node_indices) = cache_parabolic.interfaces
+ index_range = eachnode(dg)
+
+ @threaded for interface in eachinterface(dg, cache_parabolic)
+ # Get element and side index information on the primary element
+ primary_element = neighbor_ids[1, interface]
+ primary_indices = node_indices[1, interface]
+ primary_direction_index = indices2direction(primary_indices)
+
+ i_primary_start, i_primary_step_i, i_primary_step_j = index_to_start_step_3d(primary_indices[1],
+ index_range)
+ j_primary_start, j_primary_step_i, j_primary_step_j = index_to_start_step_3d(primary_indices[2],
+ index_range)
+ k_primary_start, k_primary_step_i, k_primary_step_j = index_to_start_step_3d(primary_indices[3],
+ index_range)
+
+ i_primary = i_primary_start
+ j_primary = j_primary_start
+ k_primary = k_primary_start
+
+ # Get element and side index information on the secondary element
+ secondary_element = neighbor_ids[2, interface]
+ secondary_indices = node_indices[2, interface]
+ secondary_direction_index = indices2direction(secondary_indices)
+ secondary_surface_indices = surface_indices(secondary_indices)
+
+ # Initiate the secondary index to be used in the surface for loop.
+ # This index on the primary side will always run forward but
+ # the secondary index might need to run backwards for flipped sides.
+ # Get the surface indexing on the secondary element.
+ # Note that the indices of the primary side will always run forward but
+ # the secondary indices might need to run backwards for flipped sides.
+ i_secondary_start, i_secondary_step_i, i_secondary_step_j = index_to_start_step_3d(secondary_surface_indices[1],
+ index_range)
+ j_secondary_start, j_secondary_step_i, j_secondary_step_j = index_to_start_step_3d(secondary_surface_indices[2],
+ index_range)
+ i_secondary = i_secondary_start
+ j_secondary = j_secondary_start
+
+ for j in eachnode(dg)
+ for i in eachnode(dg)
+ # We prolong the viscous flux dotted with respect the outward normal on the
+ # primary element. We assume a BR-1 type of flux.
+ viscous_flux_normal_ll, viscous_flux_normal_rr = get_surface_node_vars(cache_parabolic.interfaces.u,
+ equations_parabolic,
+ dg,
+ i, j,
+ interface)
+
+ flux = 0.5 * (viscous_flux_normal_ll + viscous_flux_normal_rr)
+
+ for v in eachvariable(equations_parabolic)
+ surface_flux_values[v, i, j, primary_direction_index, primary_element] = flux[v]
+ surface_flux_values[v, i_secondary, j_secondary, secondary_direction_index, secondary_element] = -flux[v]
+ end
+
+ # Increment the primary element indices
+ i_primary += i_primary_step_i
+ j_primary += j_primary_step_i
+ k_primary += k_primary_step_i
+ # Increment the secondary element surface indices
+ i_secondary += i_secondary_step_i
+ j_secondary += j_secondary_step_i
+ end
+ # Increment the primary element indices
+ i_primary += i_primary_step_j
+ j_primary += j_primary_step_j
+ k_primary += k_primary_step_j
+ # Increment the secondary element surface indices
+ i_secondary += i_secondary_step_j
+ j_secondary += j_secondary_step_j
+ end
+ end
+
+ return nothing
+end
+
+# TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!`
+function prolong2boundaries!(cache_parabolic, flux_viscous,
+ mesh::P4estMesh{3},
+ equations_parabolic::AbstractEquationsParabolic,
+ surface_integral, dg::DG, cache)
+ (; boundaries) = cache_parabolic
+ (; contravariant_vectors) = cache_parabolic.elements
+ index_range = eachnode(dg)
+
+ flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous
+
+ @threaded for boundary in eachboundary(dg, cache_parabolic)
+ # Copy solution data from the element using "delayed indexing" with
+ # a start value and a step size to get the correct face and orientation.
+ element = boundaries.neighbor_ids[boundary]
+ node_indices = boundaries.node_indices[boundary]
+ direction = indices2direction(node_indices)
+
+ i_node_start, i_node_step_i, i_node_step_j = index_to_start_step_3d(node_indices[1],
+ index_range)
+ j_node_start, j_node_step_i, j_node_step_j = index_to_start_step_3d(node_indices[2],
+ index_range)
+ k_node_start, k_node_step_i, k_node_step_j = index_to_start_step_3d(node_indices[3],
+ index_range)
+
+ i_node = i_node_start
+ j_node = j_node_start
+ k_node = k_node_start
+
+ for j in eachnode(dg)
+ for i in eachnode(dg)
+ # this is the outward normal direction on the primary element
+ normal_direction = get_normal_direction(direction, contravariant_vectors,
+ i_node, j_node, k_node, element)
+
+ for v in eachvariable(equations_parabolic)
+ flux_viscous = SVector(flux_viscous_x[v, i_node, j_node, k_node,
+ element],
+ flux_viscous_y[v, i_node, j_node, k_node,
+ element],
+ flux_viscous_z[v, i_node, j_node, k_node,
+ element])
+
+ boundaries.u[v, i, j, boundary] = dot(flux_viscous, normal_direction)
+ end
+ i_node += i_node_step_i
+ j_node += j_node_step_i
+ k_node += k_node_step_i
+ end
+ i_node += i_node_step_j
+ j_node += j_node_step_j
+ k_node += k_node_step_j
+ end
+ end
+ return nothing
+end
+
+# # Function barrier for type stability
+# !!! TODO: Figure out why this cannot removed eventhough it exists in the dg_2d_parabolic.jl file
+function calc_boundary_flux_gradients!(cache, t, boundary_conditions, mesh::P4estMesh,
+ equations, surface_integral, dg::DG)
+ (; boundary_condition_types, boundary_indices) = boundary_conditions
+
+ calc_boundary_flux_by_type!(cache, t, boundary_condition_types, boundary_indices,
+ Gradient(), mesh, equations, surface_integral, dg)
+ return nothing
+end
+
+function calc_boundary_flux_divergence!(cache, t, boundary_conditions, mesh::P4estMesh,
+ equations, surface_integral, dg::DG)
+ (; boundary_condition_types, boundary_indices) = boundary_conditions
+
+ calc_boundary_flux_by_type!(cache, t, boundary_condition_types, boundary_indices,
+ Divergence(), mesh, equations, surface_integral, dg)
+ return nothing
+end
+
+# Iterate over tuples of boundary condition types and associated indices
+# in a type-stable way using "lispy tuple programming".
+function calc_boundary_flux_by_type!(cache, t, BCs::NTuple{N, Any},
+ BC_indices::NTuple{N, Vector{Int}},
+ operator_type,
+ mesh::P4estMesh,
+ equations, surface_integral, dg::DG) where {N}
+ # Extract the boundary condition type and index vector
+ boundary_condition = first(BCs)
+ boundary_condition_indices = first(BC_indices)
+ # Extract the remaining types and indices to be processed later
+ remaining_boundary_conditions = Base.tail(BCs)
+ remaining_boundary_condition_indices = Base.tail(BC_indices)
+
+ # process the first boundary condition type
+ calc_boundary_flux!(cache, t, boundary_condition, boundary_condition_indices,
+ operator_type, mesh, equations, surface_integral, dg)
+
+ # recursively call this method with the unprocessed boundary types
+ calc_boundary_flux_by_type!(cache, t, remaining_boundary_conditions,
+ remaining_boundary_condition_indices,
+ operator_type,
+ mesh, equations, surface_integral, dg)
+
+ return nothing
+end
+
+# terminate the type-stable iteration over tuples
+function calc_boundary_flux_by_type!(cache, t, BCs::Tuple{}, BC_indices::Tuple{},
+ operator_type, mesh::P4estMesh, equations,
+ surface_integral, dg::DG)
+ nothing
+end
+
+function calc_boundary_flux!(cache, t,
+ boundary_condition_parabolic, # works with Dict types
+ boundary_condition_indices,
+ operator_type, mesh::P4estMesh{3},
+ equations_parabolic::AbstractEquationsParabolic,
+ surface_integral, dg::DG)
+ (; boundaries) = cache
+ (; node_coordinates, surface_flux_values) = cache.elements
+ (; contravariant_vectors) = cache.elements
+ index_range = eachnode(dg)
+
+ @threaded for local_index in eachindex(boundary_condition_indices)
+ # Use the local index to get the global boundary index from the pre-sorted list
+ boundary_index = boundary_condition_indices[local_index]
+
+ # Get information on the adjacent element, compute the surface fluxes,
+ # and store them
+ element = boundaries.neighbor_ids[boundary_index]
+ node_indices = boundaries.node_indices[boundary_index]
+ direction_index = indices2direction(node_indices)
+
+ i_node_start, i_node_step_i, i_node_step_j = index_to_start_step_3d(node_indices[1],
+ index_range)
+ j_node_start, j_node_step_i, j_node_step_j = index_to_start_step_3d(node_indices[2],
+ index_range)
+ k_node_start, k_node_step_i, k_node_step_j = index_to_start_step_3d(node_indices[3],
+ index_range)
+
+ i_node = i_node_start
+ j_node = j_node_start
+ k_node = k_node_start
+
+ for j in eachnode(dg)
+ for i in eachnode(dg)
+ # Extract solution data from boundary container
+ u_inner = get_node_vars(boundaries.u, equations_parabolic, dg, i, j,
+ boundary_index)
+
+ # Outward-pointing normal direction (not normalized)
+ normal_direction = get_normal_direction(direction_index,
+ contravariant_vectors,
+ i_node, j_node, k_node, element)
+
+ # TODO: revisit if we want more general boundary treatments.
+ # This assumes the gradient numerical flux at the boundary is the gradient variable,
+ # which is consistent with BR1, LDG.
+ flux_inner = u_inner
+
+ # Coordinates at boundary node
+ x = get_node_coords(node_coordinates, equations_parabolic, dg, i_node,
+ j_node, k_node,
+ element)
+
+ flux_ = boundary_condition_parabolic(flux_inner, u_inner, normal_direction,
+ x, t, operator_type,
+ equations_parabolic)
+
+ # Copy flux to element storage in the correct orientation
+ for v in eachvariable(equations_parabolic)
+ surface_flux_values[v, i, j, direction_index, element] = flux_[v]
+ end
+
+ i_node += i_node_step_i
+ j_node += j_node_step_i
+ k_node += k_node_step_i
+ end
+ i_node += i_node_step_j
+ j_node += j_node_step_j
+ k_node += k_node_step_j
+ end
+ end
+end
diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl
index d6d74637021..5b63b971cd8 100644
--- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl
@@ -13,7 +13,7 @@
# 2. compute f(u, grad(u))
# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call)
# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))).
-function rhs_parabolic!(du, u, t, mesh::TreeMesh{3},
+function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations_parabolic::AbstractEquationsParabolic,
initial_condition, boundary_conditions_parabolic, source_terms,
dg::DG, parabolic_scheme, cache, cache_parabolic)
@@ -105,7 +105,7 @@ end
# Transform solution variables prior to taking the gradient
# (e.g., conservative to primitive variables). Defaults to doing nothing.
# TODO: can we avoid copying data?
-function transform_variables!(u_transformed, u, mesh::TreeMesh{3},
+function transform_variables!(u_transformed, u, mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, cache, cache_parabolic)
@threaded for element in eachelement(dg, cache)
@@ -325,7 +325,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
return nothing
end
-function calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh::TreeMesh{3},
+function calc_viscous_fluxes!(flux_viscous, gradients, u_transformed,
+ mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, cache, cache_parabolic)
gradients_x, gradients_y, gradients_z = gradients
@@ -379,7 +380,7 @@ end
function calc_boundary_flux_gradients!(cache, t,
boundary_conditions_parabolic::BoundaryConditionPeriodic,
- mesh::TreeMesh{3},
+ mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG)
return nothing
@@ -387,7 +388,7 @@ end
function calc_boundary_flux_divergence!(cache, t,
boundary_conditions_parabolic::BoundaryConditionPeriodic,
- mesh::TreeMesh{3},
+ mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG)
return nothing
@@ -806,7 +807,7 @@ end
# This is because the parabolic fluxes are assumed to be of the form
# `du/dt + df/dx = dg/dx + source(x,t)`,
# where f(u) is the inviscid flux and g(u) is the viscous flux.
-function apply_jacobian_parabolic!(du, mesh::TreeMesh{3},
+function apply_jacobian_parabolic!(du, mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations::AbstractEquationsParabolic, dg::DG, cache)
@threaded for element in eachelement(dg, cache)
factor = cache.elements.inverse_jacobian[element]
diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl
index 1ae5eed44ae..67a27238969 100644
--- a/test/test_parabolic_3d.jl
+++ b/test/test_parabolic_3d.jl
@@ -86,9 +86,24 @@ isdir(outdir) && rm(outdir, recursive=true)
)
end
-end
+ @trixi_testset "P4estMesh3D: elixir_navierstokes_convergence.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", "elixir_navierstokes_convergence.jl"),
+ initial_refinement_level = 2, tspan=(0.0, 0.1),
+ l2 = [0.00026599105554982194, 0.000461877794472316, 0.0005424899076052261, 0.0004618777944723191, 0.0015846392581126832],
+ linf = [0.0025241668929956163, 0.006308461681816373, 0.004334939663169113, 0.006308461681804009, 0.03176343480493493]
+ )
+ end
+ @trixi_testset "P4estMesh3D: elixir_navierstokes_taylor_green_vortex.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"),
+ initial_refinement_level = 2, tspan=(0.0, 0.25),
+ l2 = [0.0001547509861140407, 0.015637861347119624, 0.015637861347119687, 0.022024699158522523, 0.009711013505930812],
+ linf = [0.0006696415247340326, 0.03442565722527785, 0.03442565722577423, 0.06295407168705314, 0.032857472756916195]
+ )
+ end
+
+end
# Clean up afterwards: delete Trixi.jl output directory
@test_nowarn isdir(outdir) && rm(outdir, recursive=true)
-end # module
+end # module
\ No newline at end of file
From d7ea40b19b98cc18d18e5f047131f141d3c08acc Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 25 Jul 2023 18:42:12 +0200
Subject: [PATCH 31/40] reset threads also when initializing the summary
callback (#1587)
* reset threads also when initializing the summary callback
I added the option to reset the threads from Polyester.jl in also in the summary callback.
The idea is that this supports another development workflow where we just modify the RHS
implementation and call solve again without re-ccreating the ODE.
The same comment as in 036eaed82b92be9376c5b610d8d40eddf45ca1fa applies:
However, I did not document it in the docstring since we have not documented
that we use Polyester.jl threads in general - and the resetting is specific
to Polyester.jl. I was not sure whether we still would like to keep the option
to change the threading backend any time - although I do not see a good reason
why we should do so.
---
src/callbacks_step/summary.jl | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/src/callbacks_step/summary.jl b/src/callbacks_step/summary.jl
index 08e13d0b98d..26981a58b73 100644
--- a/src/callbacks_step/summary.jl
+++ b/src/callbacks_step/summary.jl
@@ -15,10 +15,14 @@ Create and return a callback that prints a human-readable summary of the simulat
beginning of a simulation and then resets the timer. When the returned callback is executed
directly, the current timer values are shown.
"""
-function SummaryCallback()
+function SummaryCallback(reset_threads = true)
+ function initialize(cb, u, t, integrator)
+ initialize_summary_callback(cb, u, t, integrator;
+ reset_threads)
+ end
DiscreteCallback(summary_callback, summary_callback,
save_positions = (false, false),
- initialize = initialize_summary_callback)
+ initialize = initialize)
end
function Base.show(io::IO, cb::DiscreteCallback{<:Any, <:typeof(summary_callback)})
@@ -139,7 +143,15 @@ end
# Print information about the current simulation setup
# Note: This is called *after* all initialization is done, but *before* the first time step
-function initialize_summary_callback(cb::DiscreteCallback, u, t, integrator)
+function initialize_summary_callback(cb::DiscreteCallback, u, t, integrator;
+ reset_threads = true)
+ # Optionally reset Polyester.jl threads. See
+ # https://github.com/trixi-framework/Trixi.jl/issues/1583
+ # https://github.com/JuliaSIMD/Polyester.jl/issues/30
+ if reset_threads
+ Polyester.reset_threads!()
+ end
+
mpi_isroot() || return nothing
print_startup_message()
From 53a826b62241fc0f58c0a3cd0a0acc1789a79509 Mon Sep 17 00:00:00 2001
From: Arpit Babbar
Date: Wed, 26 Jul 2023 10:47:07 +0530
Subject: [PATCH 32/40] Timestep stamp in mesh file (#1580)
* Timestep stamp in mesh file
* Update src/callbacks_step/save_solution.jl
Fixes other mesh type issue
Co-authored-by: Hendrik Ranocha
* Add test for multiple mesh files
* Keep within pre-existing tests
---------
Co-authored-by: Hendrik Ranocha
---
src/callbacks_step/save_solution.jl | 9 ++++++++-
test/test_mpi_tree.jl | 6 ++++++
2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/src/callbacks_step/save_solution.jl b/src/callbacks_step/save_solution.jl
index 1fe0d6b1e15..14ea33368f8 100644
--- a/src/callbacks_step/save_solution.jl
+++ b/src/callbacks_step/save_solution.jl
@@ -155,7 +155,14 @@ function save_mesh(semi::AbstractSemidiscretization, output_directory, timestep
mesh, _, _, _ = mesh_equations_solver_cache(semi)
if mesh.unsaved_changes
- mesh.current_filename = save_mesh_file(mesh, output_directory)
+ # We only append the time step number to the mesh file name if it has
+ # changed during the simulation due to AMR. We do not append it for
+ # the first time step.
+ if timestep == 0
+ mesh.current_filename = save_mesh_file(mesh, output_directory)
+ else
+ mesh.current_filename = save_mesh_file(mesh, output_directory, timestep)
+ end
mesh.unsaved_changes = false
end
end
diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl
index 84d2609cbb1..8403fcf1b04 100644
--- a/test/test_mpi_tree.jl
+++ b/test/test_mpi_tree.jl
@@ -55,10 +55,16 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows()
# Linear scalar advection with AMR
# These example files are only for testing purposes and have no practical use
@trixi_testset "elixir_advection_amr_refine_twice.jl" begin
+ # Here, we also test that SaveSolutionCallback prints multiple mesh files with AMR
+ # Start with a clean environment: remove Trixi.jl output directory if it exists
+ outdir = "out"
+ isdir(outdir) && rm(outdir, recursive=true)
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_refine_twice.jl"),
l2 = [0.00020547512522578292],
linf = [0.007831753383083506],
coverage_override = (maxiters=6,))
+ meshfiles = filter(file -> endswith(file,".h5") && startswith(file,"mesh"), readdir(outdir))
+ @test length(meshfiles) > 1
end
@trixi_testset "elixir_advection_amr_coarsen_twice.jl" begin
From fe0e78c658283db21e581bbbc6d48d0adcf39510 Mon Sep 17 00:00:00 2001
From: Johannes Markert <10619309+jmark@users.noreply.github.com>
Date: Wed, 26 Jul 2023 10:47:54 +0200
Subject: [PATCH 33/40] Feature: t8code as meshing backend (#1426)
* Initial commit for the new feature using t8code as meshing backend.
* Delete t8code_2d_dgsem
* Added new examples and tests. Testing updates for T8code.jl.
* Worked in the comments.
* Fixed spelling.
* Update src/auxiliary/auxiliary.jl
Co-authored-by: Hendrik Ranocha
* Added whitespace in Unions.
* Adapted commented out code block reporting the no. of elements per level.
* Added dummy save mesh support for .
* Added test .
* Added to method signature.
* Deleted unnecessary comments.
* Removed commented out tests.
* Fixed Morton ordering bug in 2D at mortar interfaces.
* Disabled `save_solution` callbacks and added more tests.
* Added more tests.
* Updated code according to the review.
* Update src/auxiliary/t8code.jl
Co-authored-by: Hendrik Ranocha
* Update src/auxiliary/t8code.jl
Co-authored-by: Hendrik Ranocha
* Update src/auxiliary/t8code.jl
Co-authored-by: Hendrik Ranocha
* Update src/auxiliary/t8code.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/solvers/dgsem_t8code/containers_2d.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Code cleanup.
* Updated to T8code@0.3.0
* Fixing minor issues.
* Fixed typo.
* Code cleanup.
* Enabled `set_ghost` in examples.
* Generalized type info in function signature.
* Added namespace qualifier.
* Updated comments.
* Refactored code and deleted lots of it.
* Removed a copy operation.
* Fixed some merging issues and formatting.
* Fixed spelling.
* Fixed spelling and changed assert macro.
* Applied automatic formatting.
* Backup.
* Removed superfluous outer constructor for T8codeMesh.
* Added return statement for consistency.
* Fixed wrong indentation by autoformatter.
* Added comments.
* Made sure an exception is thrown.
* Changed flags for sc_init for t8code initialization.
* Updated formatting.
* Workaround for error about calling MPI routines after MPI has been finalized.
* Upped to T8code v0.4.1.
* Added mpi_finailize_hook for proper memory cleanup.
* Added t8code to test_threaded.jl
* Added a `save_mesh_file` call in order to satisfy code coverage.
* Improved finalizer logic for T8coeMesh.
* Refined code.
* Restructured to do blocks.
* Moved save_mesh_file call to test file.
* Fixed spelling error.
---------
Co-authored-by: Johannes Markert
Co-authored-by: Hendrik Ranocha
---
.github/workflows/ci.yml | 1 +
Project.toml | 2 +
...ixir_advection_amr_solution_independent.jl | 143 ++++++
.../elixir_advection_amr_unstructured_flag.jl | 87 ++++
.../t8code_2d_dgsem/elixir_advection_basic.jl | 59 +++
.../elixir_advection_nonconforming_flag.jl | 109 ++++
.../elixir_advection_unstructured_flag.jl | 81 +++
.../elixir_euler_free_stream.jl | 122 +++++
.../t8code_2d_dgsem/elixir_euler_sedov.jl | 97 ++++
.../elixir_euler_shockcapturing_ec.jl | 68 +++
...e_terms_nonconforming_unstructured_flag.jl | 122 +++++
.../elixir_eulergravity_convergence.jl | 77 +++
.../t8code_2d_dgsem/elixir_mhd_alfven_wave.jl | 60 +++
examples/t8code_2d_dgsem/elixir_mhd_rotor.jl | 134 +++++
.../elixir_shallowwater_source_terms.jl | 60 +++
src/Trixi.jl | 5 +-
src/auxiliary/t8code.jl | 486 ++++++++++++++++++
src/callbacks_step/amr.jl | 63 +++
src/callbacks_step/amr_dg2d.jl | 72 ++-
src/callbacks_step/analysis.jl | 30 ++
src/callbacks_step/analysis_dg2d.jl | 16 +-
src/callbacks_step/save_restart_dg.jl | 3 +-
src/callbacks_step/save_solution_dg.jl | 3 +-
src/callbacks_step/stepsize_dg2d.jl | 8 +-
src/meshes/mesh_io.jl | 9 +-
src/meshes/meshes.jl | 1 +
src/meshes/t8code_mesh.jl | 345 +++++++++++++
src/solvers/dg.jl | 4 +-
src/solvers/dgsem_p4est/containers.jl | 9 +-
src/solvers/dgsem_p4est/containers_2d.jl | 5 +-
src/solvers/dgsem_p4est/dg_2d.jl | 33 +-
src/solvers/dgsem_structured/dg_2d.jl | 19 +-
src/solvers/dgsem_t8code/containers.jl | 60 +++
src/solvers/dgsem_t8code/containers_2d.jl | 58 +++
src/solvers/dgsem_t8code/dg.jl | 31 ++
src/solvers/dgsem_tree/dg_2d.jl | 22 +-
src/solvers/dgsem_tree/indicators_2d.jl | 3 +-
src/solvers/dgsem_unstructured/dg_2d.jl | 10 +-
test/runtests.jl | 220 ++++----
test/test_t8code_2d.jl | 182 +++++++
test/test_threaded.jl | 16 +
41 files changed, 2767 insertions(+), 168 deletions(-)
create mode 100644 examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_advection_basic.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_euler_free_stream.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_euler_sedov.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_mhd_rotor.jl
create mode 100644 examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl
create mode 100644 src/auxiliary/t8code.jl
create mode 100644 src/meshes/t8code_mesh.jl
create mode 100644 src/solvers/dgsem_t8code/containers.jl
create mode 100644 src/solvers/dgsem_t8code/containers_2d.jl
create mode 100644 src/solvers/dgsem_t8code/dg.jl
create mode 100644 test/test_t8code_2d.jl
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index b0a2c93db3c..4790f93d913 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -69,6 +69,7 @@ jobs:
- structured
- p4est_part1
- p4est_part2
+ - t8code_part1
- unstructured_dgmulti
- parabolic
- paper_self_gravitating_gas_dynamics
diff --git a/Project.toml b/Project.toml
index 94c47a35ac1..db410317851 100644
--- a/Project.toml
+++ b/Project.toml
@@ -37,6 +37,7 @@ StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StrideArrays = "d1fa6d79-ef01-42a6-86c9-f7c551f8593b"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
SummationByPartsOperators = "9f78cca6-572e-554e-b819-917d2f1cf240"
+T8code = "d0cc0030-9a40-4274-8435-baadcfd54fa1"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
Triangulate = "f7e6ffb2-c36d-4f8f-a77e-16e897189344"
TriplotBase = "981d1d27-644d-49a2-9326-4793e63143c3"
@@ -80,6 +81,7 @@ StaticArrays = "1"
StrideArrays = "0.1.18"
StructArrays = "0.6"
SummationByPartsOperators = "0.5.41"
+T8code = "0.4.1"
TimerOutputs = "0.5"
Triangulate = "2.0"
TriplotBase = "0.1"
diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl
new file mode 100644
index 00000000000..653bab41e2d
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl
@@ -0,0 +1,143 @@
+using OrdinaryDiffEq
+using Trixi
+
+# Define new structs inside a module to allow re-evaluating the file.
+module TrixiExtension
+
+using Trixi
+
+struct IndicatorSolutionIndependent{Cache <: NamedTuple} <: Trixi.AbstractIndicator
+ cache::Cache
+end
+
+function IndicatorSolutionIndependent(semi)
+ basis = semi.solver.basis
+ alpha = Vector{real(basis)}()
+ cache = (; semi.mesh, alpha)
+ return IndicatorSolutionIndependent{typeof(cache)}(cache)
+end
+
+function (indicator::IndicatorSolutionIndependent)(u::AbstractArray{<:Any, 4},
+ mesh, equations, dg, cache;
+ t, kwargs...)
+ mesh = indicator.cache.mesh
+ alpha = indicator.cache.alpha
+ resize!(alpha, nelements(dg, cache))
+
+ # Predict the theoretical center.
+ advection_velocity = (0.2, -0.7)
+ center = t .* advection_velocity
+
+ inner_distance = 1
+ outer_distance = 1.85
+
+ # Iterate over all elements.
+ for element in 1:length(alpha)
+ # Calculate periodic distance between cell and center.
+ # This requires an uncurved mesh!
+ coordinates = SVector(0.5 * (cache.elements.node_coordinates[1, 1, 1, element] +
+ cache.elements.node_coordinates[1, end, 1, element]),
+ 0.5 * (cache.elements.node_coordinates[2, 1, 1, element] +
+ cache.elements.node_coordinates[2, 1, end, element]))
+
+ # The geometric shape of the amr should be preserved when the base_level is increased.
+ # This is done by looking at the original coordinates of each cell.
+ cell_coordinates = original_coordinates(coordinates, 5 / 8)
+ cell_distance = periodic_distance_2d(cell_coordinates, center, 10)
+ if cell_distance < (inner_distance + outer_distance) / 2
+ cell_coordinates = original_coordinates(coordinates, 5 / 16)
+ cell_distance = periodic_distance_2d(cell_coordinates, center, 10)
+ end
+
+ # Set alpha according to cells position inside the circles.
+ target_level = (cell_distance < inner_distance) + (cell_distance < outer_distance)
+ alpha[element] = target_level / 2
+ end
+ return alpha
+end
+
+# For periodic domains, distance between two points must take into account
+# periodic extensions of the domain.
+function periodic_distance_2d(coordinates, center, domain_length)
+ dx = coordinates .- center
+ dx_shifted = abs.(dx .% domain_length)
+ dx_periodic = min.(dx_shifted, domain_length .- dx_shifted)
+ return sqrt(sum(dx_periodic .^ 2))
+end
+
+# This takes a cells coordinates and transforms them into the coordinates of a
+# parent-cell it originally refined from. It does it so that the parent-cell
+# has given cell_length.
+function original_coordinates(coordinates, cell_length)
+ offset = coordinates .% cell_length
+ offset_sign = sign.(offset)
+ border = coordinates - offset
+ center = border + (offset_sign .* cell_length / 2)
+ return center
+end
+
+end # module TrixiExtension
+
+import .TrixiExtension
+
+###############################################################################
+# Semidiscretization of the linear advection equation.
+
+advection_velocity = (0.2, -0.7)
+equations = LinearScalarAdvectionEquation2D(advection_velocity)
+
+initial_condition = initial_condition_gauss
+
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+coordinates_min = (-5.0, -5.0)
+coordinates_max = (5.0, 5.0)
+
+mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max)
+
+trees_per_dimension = (1, 1)
+
+mesh = T8codeMesh(trees_per_dimension, polydeg = 3,
+ mapping = mapping,
+ initial_refinement_level = 1)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 10.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval,
+ extra_analysis_integrals = (entropy,))
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+amr_controller = ControllerThreeLevel(semi,
+ TrixiExtension.IndicatorSolutionIndependent(semi),
+ base_level = 4,
+ med_level = 5, med_threshold = 0.1,
+ max_level = 6, max_threshold = 0.6)
+
+amr_callback = AMRCallback(semi, amr_controller,
+ interval = 5,
+ adapt_initial_condition = true,
+ adapt_initial_condition_only_refine = true)
+
+stepsize_callback = StepsizeCallback(cfl = 1.6)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ amr_callback, stepsize_callback);
+
+###############################################################################
+# Run the simulation.
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl
new file mode 100644
index 00000000000..adf1d009a59
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl
@@ -0,0 +1,87 @@
+using Downloads: download
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the linear advection equation.
+
+advection_velocity = (0.2, -0.7)
+equations = LinearScalarAdvectionEquation2D(advection_velocity)
+
+initial_condition = initial_condition_gauss
+
+boundary_condition = BoundaryConditionDirichlet(initial_condition)
+boundary_conditions = Dict(:all => boundary_condition)
+
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+# Deformed rectangle that looks like a waving flag, lower and upper faces are
+# sinus curves, left and right are vertical lines.
+f1(s) = SVector(-5.0, 5 * s - 5.0)
+f2(s) = SVector(5.0, 5 * s + 5.0)
+f3(s) = SVector(5 * s, -5.0 + 5 * sin(0.5 * pi * s))
+f4(s) = SVector(5 * s, 5.0 + 5 * sin(0.5 * pi * s))
+faces = (f1, f2, f3, f4)
+
+# This creates a mapping that transforms [-1, 1]^2 to the domain with the faces
+# defined above. It generally doesn't work for meshes loaded from mesh files
+# because these can be meshes of arbitrary domains, but the mesh below is
+# specifically built on the domain [-1, 1]^2.
+Trixi.validate_faces(faces)
+mapping_flag = Trixi.transfinite_mapping(faces)
+
+# Unstructured mesh with 24 cells of the square domain [-1, 1]^n
+mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp")
+isfile(mesh_file) ||
+ download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
+ mesh_file)
+
+# INP mesh files are only support by p4est. Hence, we
+# create a p4est connecvity object first from which
+# we can create a t8code mesh.
+conn = Trixi.read_inp_p4est(mesh_file, Val(2))
+
+mesh = T8codeMesh{2}(conn, polydeg = 3,
+ mapping = mapping_flag,
+ initial_refinement_level = 1)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = boundary_conditions)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 10.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval,
+ extra_analysis_integrals = (entropy,))
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first),
+ base_level = 1,
+ med_level = 2, med_threshold = 0.1,
+ max_level = 3, max_threshold = 0.6)
+amr_callback = AMRCallback(semi, amr_controller,
+ interval = 5,
+ adapt_initial_condition = true,
+ adapt_initial_condition_only_refine = true)
+
+stepsize_callback = StepsizeCallback(cfl = 0.7)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ amr_callback, stepsize_callback)
+
+###############################################################################
+# Run the simulation.
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_advection_basic.jl b/examples/t8code_2d_dgsem/elixir_advection_basic.jl
new file mode 100644
index 00000000000..efc51226586
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_advection_basic.jl
@@ -0,0 +1,59 @@
+# The same setup as tree_2d_dgsem/elixir_advection_basic.jl
+# to verify the StructuredMesh implementation against TreeMesh
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the linear advection equation
+
+advection_velocity = (0.2, -0.7)
+equations = LinearScalarAdvectionEquation2D(advection_velocity)
+
+# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y))
+coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y))
+
+mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max)
+
+trees_per_dimension = (8, 8)
+
+mesh = T8codeMesh(trees_per_dimension, polydeg = 3,
+ mapping = mapping,
+ initial_refinement_level = 1)
+
+# A semidiscretization collects data structures and functions for the spatial discretization
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test,
+ solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 1.0
+ode = semidiscretize(semi, (0.0, 1.0));
+
+# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup
+# and resets the timers
+summary_callback = SummaryCallback()
+
+# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results
+analysis_callback = AnalysisCallback(semi, interval = 100)
+
+# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl = 1.6)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+
+# Print the timer summary
+summary_callback()
diff --git a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl
new file mode 100644
index 00000000000..31a8bc93697
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl
@@ -0,0 +1,109 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the linear advection equation
+
+advection_velocity = (0.2, -0.7)
+equations = LinearScalarAdvectionEquation2D(advection_velocity)
+
+# Create DG solver with polynomial degree = 4 and (local) Lax-Friedrichs/Rusanov flux as surface flux
+solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs)
+
+# Deformed rectangle that looks like a waving flag,
+# lower and upper faces are sinus curves, left and right are vertical lines.
+f1(s) = SVector(-1.0, s - 1.0)
+f2(s) = SVector(1.0, s + 1.0)
+f3(s) = SVector(s, -1.0 + sin(0.5 * pi * s))
+f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s))
+
+faces = (f1, f2, f3, f4)
+mapping = Trixi.transfinite_mapping(faces)
+
+# Create P4estMesh with 3 x 2 trees and 6 x 4 elements,
+# approximate the geometry with a smaller polydeg for testing.
+trees_per_dimension = (3, 2)
+mesh = T8codeMesh(trees_per_dimension, polydeg = 3,
+ mapping = mapping,
+ initial_refinement_level = 1)
+
+function adapt_callback(forest,
+ forest_from,
+ which_tree,
+ lelement_id,
+ ts,
+ is_family,
+ num_elements,
+ elements_ptr)::Cint
+ vertex = Vector{Cdouble}(undef, 3)
+
+ elements = unsafe_wrap(Array, elements_ptr, num_elements)
+
+ Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex))
+
+ level = Trixi.t8_element_level(ts, elements[1])
+
+ # TODO: Make this condition more general.
+ if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 4
+ # return true (refine)
+ return 1
+ else
+ # return false (don't refine)
+ return 0
+ end
+end
+
+Trixi.@T8_ASSERT(Trixi.t8_forest_is_committed(mesh.forest)!=0);
+
+# Init new forest.
+new_forest_ref = Ref{Trixi.t8_forest_t}()
+Trixi.t8_forest_init(new_forest_ref);
+new_forest = new_forest_ref[]
+
+# Check out `examples/t8_step4_partition_balance_ghost.jl` in
+# https://github.com/DLR-AMR/T8code.jl for detailed explanations.
+let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0
+ Trixi.t8_forest_set_user_data(new_forest, C_NULL)
+ Trixi.t8_forest_set_adapt(new_forest, mesh.forest,
+ Trixi.@t8_adapt_callback(adapt_callback), recursive)
+ Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition)
+ Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening)
+ Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES)
+ Trixi.t8_forest_commit(new_forest)
+end
+
+mesh.forest = new_forest
+
+# A semidiscretization collects data structures and functions for the spatial discretization
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test,
+ solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 0.2
+ode = semidiscretize(semi, (0.0, 0.2));
+
+# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup
+# and resets the timers
+summary_callback = SummaryCallback()
+
+# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results
+analysis_callback = AnalysisCallback(semi, interval = 100)
+
+# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl = 1.6)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+
+# Print the timer summary
+summary_callback()
diff --git a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl
new file mode 100644
index 00000000000..df9cbc26f6e
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl
@@ -0,0 +1,81 @@
+using Downloads: download
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the linear advection equation.
+
+advection_velocity = (0.2, -0.7)
+equations = LinearScalarAdvectionEquation2D(advection_velocity)
+
+initial_condition = initial_condition_convergence_test
+
+boundary_condition = BoundaryConditionDirichlet(initial_condition)
+boundary_conditions = Dict(:all => boundary_condition)
+
+# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux.
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+# Deformed rectangle that looks like a waving flag,
+# lower and upper faces are sinus curves, left and right are vertical lines.
+f1(s) = SVector(-1.0, s - 1.0)
+f2(s) = SVector(1.0, s + 1.0)
+f3(s) = SVector(s, -1.0 + sin(0.5 * pi * s))
+f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s))
+faces = (f1, f2, f3, f4)
+
+Trixi.validate_faces(faces)
+mapping_flag = Trixi.transfinite_mapping(faces)
+
+# Unstructured mesh with 24 cells of the square domain [-1, 1]^n.
+mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp")
+isfile(mesh_file) ||
+ download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
+ mesh_file)
+
+# INP mesh files are only support by p4est. Hence, we
+# create a p4est connecvity object first from which
+# we can create a t8code mesh.
+conn = Trixi.read_inp_p4est(mesh_file, Val(2))
+
+mesh = T8codeMesh{2}(conn, polydeg = 3,
+ mapping = mapping_flag,
+ initial_refinement_level = 2)
+
+# A semidiscretization collects data structures and functions for the spatial discretization.
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = boundary_conditions)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 0.2.
+tspan = (0.0, 0.2)
+ode = semidiscretize(semi, tspan)
+
+# At the beginning of the main loop, the SummaryCallback prints a summary of
+# the simulation setup and resets the timers.
+summary_callback = SummaryCallback()
+
+# The AnalysisCallback allows to analyse the solution in regular intervals and
+# prints the results.
+analysis_callback = AnalysisCallback(semi, interval = 100)
+
+# The StepsizeCallback handles the re-calculation of the maximum Δt after each
+# time step.
+stepsize_callback = StepsizeCallback(cfl = 1.4)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to
+# the ODE solver.
+callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback)
+
+###############################################################################
+# Run the simulation.
+
+# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks.
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # Solve needs some value here but it will be overwritten by the stepsize_callback.
+ save_everystep = false, callback = callbacks);
+
+# Print the timer summary.
+summary_callback()
diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl
new file mode 100644
index 00000000000..01e0449c67e
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl
@@ -0,0 +1,122 @@
+using Downloads: download
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the compressible Euler equations.
+
+equations = CompressibleEulerEquations2D(1.4)
+
+initial_condition = initial_condition_constant
+
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+# Mapping as described in https://arxiv.org/abs/2012.12040 but reduced to 2D
+function mapping(xi_, eta_)
+ # Transform input variables between -1 and 1 onto [0,3]
+ xi = 1.5 * xi_ + 1.5
+ eta = 1.5 * eta_ + 1.5
+
+ y = eta + 3 / 8 * (cos(1.5 * pi * (2 * xi - 3) / 3) *
+ cos(0.5 * pi * (2 * eta - 3) / 3))
+
+ x = xi + 3 / 8 * (cos(0.5 * pi * (2 * xi - 3) / 3) *
+ cos(2 * pi * (2 * y - 3) / 3))
+
+ return SVector(x, y)
+end
+
+###############################################################################
+# Get the uncurved mesh from a file (downloads the file if not available locally)
+
+# Unstructured mesh with 48 cells of the square domain [-1, 1]^n
+mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp")
+isfile(mesh_file) ||
+ download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp",
+ mesh_file)
+
+# INP mesh files are only support by p4est. Hence, we
+# create a p4est connecvity object first from which
+# we can create a t8code mesh.
+conn = Trixi.read_inp_p4est(mesh_file, Val(2))
+
+mesh = T8codeMesh{2}(conn, polydeg = 3,
+ mapping = mapping,
+ initial_refinement_level = 1)
+
+function adapt_callback(forest,
+ forest_from,
+ which_tree,
+ lelement_id,
+ ts,
+ is_family,
+ num_elements,
+ elements_ptr)::Cint
+ vertex = Vector{Cdouble}(undef, 3)
+
+ elements = unsafe_wrap(Array, elements_ptr, num_elements)
+
+ Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex))
+
+ level = Trixi.t8_element_level(ts, elements[1])
+
+ # TODO: Make this condition more general.
+ if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 3
+ # return true (refine)
+ return 1
+ else
+ # return false (don't refine)
+ return 0
+ end
+end
+
+Trixi.@T8_ASSERT(Trixi.t8_forest_is_committed(mesh.forest)!=0);
+
+# Init new forest.
+new_forest_ref = Ref{Trixi.t8_forest_t}()
+Trixi.t8_forest_init(new_forest_ref);
+new_forest = new_forest_ref[]
+
+# Check out `examples/t8_step4_partition_balance_ghost.jl` in
+# https://github.com/DLR-AMR/T8code.jl for detailed explanations.
+let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0
+ Trixi.t8_forest_set_user_data(new_forest, C_NULL)
+ Trixi.t8_forest_set_adapt(new_forest, mesh.forest,
+ Trixi.@t8_adapt_callback(adapt_callback), recursive)
+ Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition)
+ Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening)
+ Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES)
+ Trixi.t8_forest_commit(new_forest)
+end
+
+mesh.forest = new_forest
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition)))
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+stepsize_callback = StepsizeCallback(cfl = 2.0)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl
new file mode 100644
index 00000000000..965d794f8dc
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl
@@ -0,0 +1,97 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the compressible Euler equations.
+
+equations = CompressibleEulerEquations2D(1.4)
+
+"""
+ initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D)
+
+The Sedov blast wave setup based on Flash
+- http://flash.uchicago.edu/site/flashcode/user_support/flash_ug_devel/node184.html#SECTION010114000000000000000
+"""
+function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D)
+ # Set up polar coordinates
+ inicenter = SVector(0.0, 0.0)
+ x_norm = x[1] - inicenter[1]
+ y_norm = x[2] - inicenter[2]
+ r = sqrt(x_norm^2 + y_norm^2)
+
+ # Setup based on http://flash.uchicago.edu/site/flashcode/user_support/flash_ug_devel/node184.html#SECTION010114000000000000000
+ r0 = 0.21875 # = 3.5 * smallest dx (for domain length=4 and max-ref=6)
+ E = 1.0
+ p0_inner = 3 * (equations.gamma - 1) * E / (3 * pi * r0^2)
+ p0_outer = 1.0e-5 # = true Sedov setup
+
+ # Calculate primitive variables
+ rho = 1.0
+ v1 = 0.0
+ v2 = 0.0
+ p = r > r0 ? p0_outer : p0_inner
+
+ return prim2cons(SVector(rho, v1, v2, p), equations)
+end
+
+initial_condition = initial_condition_sedov_blast_wave
+
+# Get the DG approximation space
+surface_flux = flux_lax_friedrichs
+volume_flux = flux_ranocha
+polydeg = 4
+basis = LobattoLegendreBasis(polydeg)
+indicator_sc = IndicatorHennemannGassner(equations, basis,
+ alpha_max = 1.0,
+ alpha_min = 0.001,
+ alpha_smooth = true,
+ variable = density_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg = volume_flux,
+ volume_flux_fv = surface_flux)
+
+solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux,
+ volume_integral = volume_integral)
+
+###############################################################################
+
+coordinates_min = (-1.0, -1.0)
+coordinates_max = (1.0, 1.0)
+
+mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max)
+
+trees_per_dimension = (4, 4)
+
+mesh = T8codeMesh(trees_per_dimension, polydeg = 4,
+ mapping = mapping,
+ initial_refinement_level = 2, periodicity = true)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 12.5)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 300
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+stepsize_callback = StepsizeCallback(cfl = 0.5)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl
new file mode 100644
index 00000000000..55a9063a001
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl
@@ -0,0 +1,68 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the compressible Euler equations.
+
+equations = CompressibleEulerEquations2D(1.4)
+
+initial_condition = initial_condition_weak_blast_wave
+
+surface_flux = flux_ranocha
+volume_flux = flux_ranocha
+polydeg = 4
+basis = LobattoLegendreBasis(polydeg)
+indicator_sc = IndicatorHennemannGassner(equations, basis,
+ alpha_max = 1.0,
+ alpha_min = 0.001,
+ alpha_smooth = true,
+ variable = density_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg = volume_flux,
+ volume_flux_fv = surface_flux)
+
+solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux,
+ volume_integral = volume_integral)
+
+###############################################################################
+
+coordinates_min = (-1.0, -1.0)
+coordinates_max = (1.0, 1.0)
+
+mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max)
+
+trees_per_dimension = (4, 4)
+
+mesh = T8codeMesh(trees_per_dimension, polydeg = 4,
+ mapping = mapping,
+ initial_refinement_level = 2, periodicity = true)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 2.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+stepsize_callback = StepsizeCallback(cfl = 1.0)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl
new file mode 100644
index 00000000000..21f26d79ba8
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl
@@ -0,0 +1,122 @@
+using Downloads: download
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the compressible Euler equations
+
+equations = CompressibleEulerEquations2D(1.4)
+
+initial_condition = initial_condition_convergence_test
+
+source_terms = source_terms_convergence_test
+
+# BCs must be passed as Dict
+boundary_condition = BoundaryConditionDirichlet(initial_condition)
+boundary_conditions = Dict(:all => boundary_condition)
+
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+# Deformed rectangle that looks like a waving flag,
+# lower and upper faces are sinus curves, left and right are vertical lines.
+f1(s) = SVector(-1.0, s - 1.0)
+f2(s) = SVector(1.0, s + 1.0)
+f3(s) = SVector(s, -1.0 + sin(0.5 * pi * s))
+f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s))
+faces = (f1, f2, f3, f4)
+
+Trixi.validate_faces(faces)
+mapping_flag = Trixi.transfinite_mapping(faces)
+
+# Get the uncurved mesh from a file (downloads the file if not available locally)
+# Unstructured mesh with 24 cells of the square domain [-1, 1]^n
+mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp")
+isfile(mesh_file) ||
+ download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
+ mesh_file)
+
+# INP mesh files are only support by p4est. Hence, we
+# create a p4est connecvity object first from which
+# we can create a t8code mesh.
+conn = Trixi.read_inp_p4est(mesh_file, Val(2))
+
+mesh = T8codeMesh{2}(conn, polydeg = 3,
+ mapping = mapping_flag,
+ initial_refinement_level = 1)
+
+function adapt_callback(forest,
+ forest_from,
+ which_tree,
+ lelement_id,
+ ts,
+ is_family,
+ num_elements,
+ elements_ptr)::Cint
+ vertex = Vector{Cdouble}(undef, 3)
+
+ elements = unsafe_wrap(Array, elements_ptr, num_elements)
+
+ Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex))
+
+ level = Trixi.t8_element_level(ts, elements[1])
+
+ # TODO: Make this condition more general.
+ if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 2
+ # return true (refine)
+ return 1
+ else
+ # return false (don't refine)
+ return 0
+ end
+end
+
+@assert(Trixi.t8_forest_is_committed(mesh.forest)!=0);
+
+# Init new forest.
+new_forest_ref = Ref{Trixi.t8_forest_t}()
+Trixi.t8_forest_init(new_forest_ref);
+new_forest = new_forest_ref[]
+
+# Check out `examples/t8_step4_partition_balance_ghost.jl` in
+# https://github.com/DLR-AMR/T8code.jl for detailed explanations.
+let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0
+ Trixi.t8_forest_set_user_data(new_forest, C_NULL)
+ Trixi.t8_forest_set_adapt(new_forest, mesh.forest,
+ Trixi.@t8_adapt_callback(adapt_callback), recursive)
+ Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition)
+ Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening)
+ Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES)
+ Trixi.t8_forest_commit(new_forest)
+end
+
+mesh.forest = new_forest
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ source_terms = source_terms,
+ boundary_conditions = boundary_conditions)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+stepsize_callback = StepsizeCallback(cfl = 0.8)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ stepsize_callback)
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl
new file mode 100644
index 00000000000..32649eacff4
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl
@@ -0,0 +1,77 @@
+using OrdinaryDiffEq
+using Trixi
+
+initial_condition = initial_condition_eoc_test_coupled_euler_gravity
+
+###############################################################################
+# semidiscretization of the compressible Euler equations
+gamma = 2.0
+equations_euler = CompressibleEulerEquations2D(gamma)
+
+polydeg = 3
+solver_euler = DGSEM(polydeg, flux_hll)
+
+coordinates_min = (0.0, 0.0)
+coordinates_max = (2.0, 2.0)
+
+trees_per_dimension = (1, 1)
+
+mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max)
+
+mesh = T8codeMesh(trees_per_dimension, polydeg = 1,
+ mapping = mapping,
+ initial_refinement_level = 2)
+
+semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_condition, solver_euler,
+ source_terms=source_terms_eoc_test_coupled_euler_gravity)
+
+
+###############################################################################
+# semidiscretization of the hyperbolic diffusion equations
+equations_gravity = HyperbolicDiffusionEquations2D()
+
+solver_gravity = DGSEM(polydeg, flux_lax_friedrichs)
+
+semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity,
+ source_terms=source_terms_harmonic)
+
+
+###############################################################################
+# combining both semidiscretizations for Euler + self-gravity
+parameters = ParametersEulerGravity(background_density=2.0, # aka rho0
+ # rho0 is (ab)used to add a "+8π" term to the source terms
+ # for the manufactured solution
+ gravitational_constant=1.0, # aka G
+ cfl=1.1,
+ resid_tol=1.0e-10,
+ n_iterations_max=1000,
+ timestep_gravity=timestep_gravity_erk52_3Sstar!)
+
+semi = SemidiscretizationEulerGravity(semi_euler, semi_gravity, parameters)
+
+
+###############################################################################
+# ODE solvers, callbacks etc.
+tspan = (0.0, 0.5)
+ode = semidiscretize(semi, tspan);
+
+summary_callback = SummaryCallback()
+
+stepsize_callback = StepsizeCallback(cfl=0.8)
+
+analysis_interval = 100
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+analysis_callback = AnalysisCallback(semi_euler, interval=analysis_interval,
+ save_analysis=true)
+
+callbacks = CallbackSet(summary_callback, stepsize_callback,
+ analysis_callback, alive_callback)
+
+###############################################################################
+# run the simulation
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+summary_callback() # print the timer summary
+println("Number of gravity subcycles: ", semi.gravity_counter.ncalls_since_readout)
diff --git a/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl
new file mode 100644
index 00000000000..463f916fa2e
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl
@@ -0,0 +1,60 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the compressible ideal GLM-MHD equations.
+
+gamma = 5/3
+equations = IdealGlmMhdEquations2D(gamma)
+
+initial_condition = initial_condition_convergence_test
+
+# Get the DG approximation space
+volume_flux = (flux_central, flux_nonconservative_powell)
+solver = DGSEM(polydeg=4, surface_flux=(flux_hll, flux_nonconservative_powell),
+ volume_integral=VolumeIntegralFluxDifferencing(volume_flux))
+
+coordinates_min = (0.0 , 0.0 )
+coordinates_max = (sqrt(2.0), sqrt(2.0))
+
+mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max)
+
+trees_per_dimension = (8, 8)
+
+mesh = T8codeMesh(trees_per_dimension, polydeg=3,
+ mapping=mapping,
+ initial_refinement_level=0, periodicity=true)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+cfl = 0.9
+stepsize_callback = StepsizeCallback(cfl=cfl)
+
+glm_speed_callback = GlmSpeedCallback(glm_scale=0.5, cfl=cfl)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback,
+ stepsize_callback,
+ glm_speed_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl
new file mode 100644
index 00000000000..9a4bd99e444
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl
@@ -0,0 +1,134 @@
+using Downloads: download
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the compressible ideal GLM-MHD equations
+equations = IdealGlmMhdEquations2D(1.4)
+
+"""
+ initial_condition_rotor(x, t, equations::IdealGlmMhdEquations2D)
+
+The classical MHD rotor test case. Here, the setup is taken from
+- Dominik Derigs, Gregor J. Gassner, Stefanie Walch & Andrew R. Winters (2018)
+ Entropy Stable Finite Volume Approximations for Ideal Magnetohydrodynamics
+ [doi: 10.1365/s13291-018-0178-9](https://doi.org/10.1365/s13291-018-0178-9)
+"""
+function initial_condition_rotor(x, t, equations::IdealGlmMhdEquations2D)
+ # setup taken from Derigs et al. DMV article (2018)
+ # domain must be [0, 1] x [0, 1], γ = 1.4
+ dx = x[1] - 0.5
+ dy = x[2] - 0.5
+ r = sqrt(dx^2 + dy^2)
+ f = (0.115 - r) / 0.015
+ if r <= 0.1
+ rho = 10.0
+ v1 = -20.0 * dy
+ v2 = 20.0 * dx
+ elseif r >= 0.115
+ rho = 1.0
+ v1 = 0.0
+ v2 = 0.0
+ else
+ rho = 1.0 + 9.0 * f
+ v1 = -20.0 * f * dy
+ v2 = 20.0 * f * dx
+ end
+ v3 = 0.0
+ p = 1.0
+ B1 = 5.0 / sqrt(4.0 * pi)
+ B2 = 0.0
+ B3 = 0.0
+ psi = 0.0
+ return prim2cons(SVector(rho, v1, v2, v3, p, B1, B2, B3, psi), equations)
+end
+initial_condition = initial_condition_rotor
+
+surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell)
+volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell)
+polydeg = 4
+basis = LobattoLegendreBasis(polydeg)
+indicator_sc = IndicatorHennemannGassner(equations, basis,
+ alpha_max = 0.5,
+ alpha_min = 0.001,
+ alpha_smooth = true,
+ variable = density_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg = volume_flux,
+ volume_flux_fv = surface_flux)
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+# Affine type mapping to take the [-1,1]^2 domain from the mesh file
+# and put it onto the rotor domain [0,1]^2 and then warp it with a mapping
+# as described in https://arxiv.org/abs/2012.12040
+function mapping_twist(xi, eta)
+ y = 0.5 * (eta + 1.0) +
+ 0.05 * cos(1.5 * pi * (2.0 * xi - 1.0)) * cos(0.5 * pi * (2.0 * eta - 1.0))
+ x = 0.5 * (xi + 1.0) + 0.05 * cos(0.5 * pi * (2.0 * xi - 1.0)) * cos(2.0 * pi * y)
+ return SVector(x, y)
+end
+
+mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp")
+isfile(mesh_file) ||
+ download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
+ mesh_file)
+
+# INP mesh files are only support by p4est. Hence, we
+# create a p4est connecvity object first from which
+# we can create a t8code mesh.
+conn = Trixi.read_inp_p4est(mesh_file, Val(2))
+
+mesh = T8codeMesh{2}(conn, polydeg = 4,
+ mapping = mapping_twist,
+ initial_refinement_level = 1)
+
+boundary_condition = BoundaryConditionDirichlet(initial_condition)
+boundary_conditions = Dict(:all => boundary_condition)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = boundary_conditions)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 0.15)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+amr_indicator = IndicatorLöhner(semi,
+ variable = density_pressure)
+
+amr_controller = ControllerThreeLevel(semi, amr_indicator,
+ base_level = 1,
+ med_level = 3, med_threshold = 0.05,
+ max_level = 5, max_threshold = 0.1)
+amr_callback = AMRCallback(semi, amr_controller,
+ interval = 5,
+ adapt_initial_condition = true,
+ adapt_initial_condition_only_refine = true)
+
+cfl = 0.5
+stepsize_callback = StepsizeCallback(cfl = cfl)
+
+glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback,
+ amr_callback,
+ stepsize_callback,
+ glm_speed_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl
new file mode 100644
index 00000000000..c19f440ebc7
--- /dev/null
+++ b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl
@@ -0,0 +1,60 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the shallow water equations.
+
+equations = ShallowWaterEquations2D(gravity_constant=9.81)
+
+initial_condition = initial_condition_convergence_test # MMS EOC test
+
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal)
+solver = DGSEM(polydeg=3, surface_flux=(flux_lax_friedrichs, flux_nonconservative_fjordholm_etal),
+ volume_integral=VolumeIntegralFluxDifferencing(volume_flux))
+
+###############################################################################
+# Get the P4estMesh and setup a periodic mesh
+
+coordinates_min = (0.0, 0.0) # minimum coordinates (min(x), min(y))
+coordinates_max = (sqrt(2.0), sqrt(2.0)) # maximum coordinates (max(x), max(y))
+
+mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max)
+
+trees_per_dimension = (8, 8)
+
+mesh = T8codeMesh(trees_per_dimension, polydeg=3,
+ mapping=mapping,
+ initial_refinement_level=1)
+
+# A semidiscretization collects data structures and functions for the spatial discretization
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ source_terms=source_terms_convergence_test)
+
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 1.0
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 500
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback)
+
+###############################################################################
+# run the simulation
+
+# use a Runge-Kutta method with automatic (error based) time step size control
+sol = solve(ode, RDPK3SpFSAL49(); abstol=1.0e-8, reltol=1.0e-8,
+ ode_default_options()..., callback=callbacks);
+summary_callback() # print the timer summary
diff --git a/src/Trixi.jl b/src/Trixi.jl
index b0c872b1904..990c33f3c94 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -54,6 +54,7 @@ using Octavian: Octavian, matmul!
using Polyester: Polyester, @batch # You know, the cheapest threads you can find...
using OffsetArrays: OffsetArray, OffsetVector
using P4est
+using T8code
using Setfield: @set
using RecipesBase: RecipesBase
using Requires: @require
@@ -110,6 +111,7 @@ include("basic_types.jl")
include("auxiliary/auxiliary.jl")
include("auxiliary/mpi.jl")
include("auxiliary/p4est.jl")
+include("auxiliary/t8code.jl")
include("equations/equations.jl")
include("meshes/meshes.jl")
include("solvers/solvers.jl")
@@ -210,7 +212,7 @@ export entropy, energy_total, energy_kinetic, energy_internal, energy_magnetic,
export lake_at_rest_error
export ncomponents, eachcomponent
-export TreeMesh, StructuredMesh, UnstructuredMesh2D, P4estMesh
+export TreeMesh, StructuredMesh, UnstructuredMesh2D, P4estMesh, T8codeMesh
export DG,
DGSEM, LobattoLegendreBasis,
@@ -277,6 +279,7 @@ function __init__()
init_mpi()
init_p4est()
+ init_t8code()
register_error_hints()
diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl
new file mode 100644
index 00000000000..37cb782bb93
--- /dev/null
+++ b/src/auxiliary/t8code.jl
@@ -0,0 +1,486 @@
+"""
+ init_t8code()
+
+Initialize `t8code` by calling `sc_init`, `p4est_init`, and `t8_init` while
+setting the log level to `SC_LP_ERROR`. This function will check if `t8code`
+is already initialized and if yes, do nothing, thus it is safe to call it
+multiple times.
+"""
+function init_t8code()
+ t8code_package_id = t8_get_package_id()
+ if t8code_package_id >= 0
+ return nothing
+ end
+
+ # Initialize the sc library, has to happen before we initialize t8code.
+ let catch_signals = 0, print_backtrace = 0, log_handler = C_NULL
+ T8code.Libt8.sc_init(mpi_comm(), catch_signals, print_backtrace, log_handler,
+ T8code.Libt8.SC_LP_ERROR)
+ end
+
+ if T8code.Libt8.p4est_is_initialized() == 0
+ # Initialize `p4est` with log level ERROR to prevent a lot of output in AMR simulations
+ T8code.Libt8.p4est_init(C_NULL, T8code.Libt8.SC_LP_ERROR)
+ end
+
+ # Initialize t8code with log level ERROR to prevent a lot of output in AMR simulations.
+ t8_init(T8code.Libt8.SC_LP_ERROR)
+
+ if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE")
+ # Normally, `sc_finalize` should always be called during shutdown of an
+ # application. It checks whether there is still un-freed memory by t8code
+ # and/or T8code.jl and throws an exception if this is the case. For
+ # production runs this is not mandatory, but is helpful during
+ # development. Hence, this option is only activated when environment
+ # variable TRIXI_T8CODE_SC_FINALIZE exists.
+ @warn "T8code.jl: sc_finalize will be called during shutdown of Trixi.jl."
+ MPI.add_finalize_hook!(T8code.Libt8.sc_finalize)
+ end
+
+ return nothing
+end
+
+function trixi_t8_unref_forest(forest)
+ t8_forest_unref(Ref(forest))
+end
+
+function t8_free(ptr)
+ T8code.Libt8.sc_free(t8_get_package_id(), ptr)
+end
+
+function trixi_t8_count_interfaces(forest)
+ # Check that forest is a committed, that is valid and usable, forest.
+ @assert t8_forest_is_committed(forest) != 0
+
+ # Get the number of local elements of forest.
+ num_local_elements = t8_forest_get_local_num_elements(forest)
+ # Get the number of ghost elements of forest.
+ num_ghost_elements = t8_forest_get_num_ghosts(forest)
+ # Get the number of trees that have elements of this process.
+ num_local_trees = t8_forest_get_num_local_trees(forest)
+
+ current_index = t8_locidx_t(0)
+
+ local_num_conform = 0
+ local_num_mortars = 0
+ local_num_boundary = 0
+
+ for itree in 0:(num_local_trees - 1)
+ tree_class = t8_forest_get_tree_class(forest, itree)
+ eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class)
+
+ # Get the number of elements of this tree.
+ num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree)
+
+ for ielement in 0:(num_elements_in_tree - 1)
+ element = t8_forest_get_element_in_tree(forest, itree, ielement)
+
+ level = t8_element_level(eclass_scheme, element)
+
+ num_faces = t8_element_num_faces(eclass_scheme, element)
+
+ for iface in 0:(num_faces - 1)
+ pelement_indices_ref = Ref{Ptr{t8_locidx_t}}()
+ pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}()
+ pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}()
+
+ dual_faces_ref = Ref{Ptr{Cint}}()
+ num_neighbors_ref = Ref{Cint}()
+
+ forest_is_balanced = Cint(1)
+
+ t8_forest_leaf_face_neighbors(forest, itree, element,
+ pneighbor_leafs_ref, iface, dual_faces_ref,
+ num_neighbors_ref,
+ pelement_indices_ref, pneigh_scheme_ref,
+ forest_is_balanced)
+
+ num_neighbors = num_neighbors_ref[]
+ neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[],
+ num_neighbors)
+ neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors)
+ neighbor_scheme = pneigh_scheme_ref[]
+
+ if num_neighbors > 0
+ neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1])
+
+ # Conforming interface: The second condition ensures we only visit the interface once.
+ if level == neighbor_level && current_index <= neighbor_ielements[1]
+ local_num_conform += 1
+ elseif level < neighbor_level
+ local_num_mortars += 1
+ end
+
+ else
+ local_num_boundary += 1
+ end
+
+ t8_free(dual_faces_ref[])
+ t8_free(pneighbor_leafs_ref[])
+ t8_free(pelement_indices_ref[])
+ end # for
+
+ current_index += 1
+ end # for
+ end # for
+
+ return (interfaces = local_num_conform,
+ mortars = local_num_mortars,
+ boundaries = local_num_boundary)
+end
+
+function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundaries,
+ boundary_names)
+ # Check that forest is a committed, that is valid and usable, forest.
+ @assert t8_forest_is_committed(forest) != 0
+
+ # Get the number of local elements of forest.
+ num_local_elements = t8_forest_get_local_num_elements(forest)
+ # Get the number of ghost elements of forest.
+ num_ghost_elements = t8_forest_get_num_ghosts(forest)
+ # Get the number of trees that have elements of this process.
+ num_local_trees = t8_forest_get_num_local_trees(forest)
+
+ current_index = t8_locidx_t(0)
+
+ local_num_conform = 0
+ local_num_mortars = 0
+ local_num_boundary = 0
+
+ for itree in 0:(num_local_trees - 1)
+ tree_class = t8_forest_get_tree_class(forest, itree)
+ eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class)
+
+ # Get the number of elements of this tree.
+ num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree)
+
+ for ielement in 0:(num_elements_in_tree - 1)
+ element = t8_forest_get_element_in_tree(forest, itree, ielement)
+
+ level = t8_element_level(eclass_scheme, element)
+
+ num_faces = t8_element_num_faces(eclass_scheme, element)
+
+ for iface in 0:(num_faces - 1)
+
+ # Compute the `orientation` of the touching faces.
+ if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1
+ cmesh = t8_forest_get_cmesh(forest)
+ itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(forest, itree)
+ iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface)
+ orientation_ref = Ref{Cint}()
+
+ t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL,
+ orientation_ref)
+ orientation = orientation_ref[]
+ else
+ orientation = zero(Cint)
+ end
+
+ pelement_indices_ref = Ref{Ptr{t8_locidx_t}}()
+ pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}()
+ pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}()
+
+ dual_faces_ref = Ref{Ptr{Cint}}()
+ num_neighbors_ref = Ref{Cint}()
+
+ forest_is_balanced = Cint(1)
+
+ t8_forest_leaf_face_neighbors(forest, itree, element,
+ pneighbor_leafs_ref, iface, dual_faces_ref,
+ num_neighbors_ref,
+ pelement_indices_ref, pneigh_scheme_ref,
+ forest_is_balanced)
+
+ num_neighbors = num_neighbors_ref[]
+ dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors)
+ neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[],
+ num_neighbors)
+ neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors)
+ neighbor_scheme = pneigh_scheme_ref[]
+
+ if num_neighbors > 0
+ neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1])
+
+ # Conforming interface: The second condition ensures we only visit the interface once.
+ if level == neighbor_level && current_index <= neighbor_ielements[1]
+ local_num_conform += 1
+
+ faces = (iface, dual_faces[1])
+ interface_id = local_num_conform
+
+ # Write data to interfaces container.
+ interfaces.neighbor_ids[1, interface_id] = current_index + 1
+ interfaces.neighbor_ids[2, interface_id] = neighbor_ielements[1] + 1
+
+ # Iterate over primary and secondary element.
+ for side in 1:2
+ # Align interface in positive coordinate direction of primary element.
+ # For orientation == 1, the secondary element needs to be indexed backwards
+ # relative to the interface.
+ if side == 1 || orientation == 0
+ # Forward indexing
+ indexing = :i_forward
+ else
+ # Backward indexing
+ indexing = :i_backward
+ end
+
+ if faces[side] == 0
+ # Index face in negative x-direction
+ interfaces.node_indices[side, interface_id] = (:begin,
+ indexing)
+ elseif faces[side] == 1
+ # Index face in positive x-direction
+ interfaces.node_indices[side, interface_id] = (:end,
+ indexing)
+ elseif faces[side] == 2
+ # Index face in negative y-direction
+ interfaces.node_indices[side, interface_id] = (indexing,
+ :begin)
+ else # faces[side] == 3
+ # Index face in positive y-direction
+ interfaces.node_indices[side, interface_id] = (indexing,
+ :end)
+ end
+ end
+
+ # Non-conforming interface.
+ elseif level < neighbor_level
+ local_num_mortars += 1
+
+ faces = (dual_faces[1], iface)
+
+ mortar_id = local_num_mortars
+
+ # Last entry is the large element.
+ mortars.neighbor_ids[end, mortar_id] = current_index + 1
+
+ # First `1:end-1` entries are the smaller elements.
+ mortars.neighbor_ids[1:(end - 1), mortar_id] .= neighbor_ielements .+
+ 1
+
+ for side in 1:2
+ # Align mortar in positive coordinate direction of small side.
+ # For orientation == 1, the large side needs to be indexed backwards
+ # relative to the mortar.
+ if side == 1 || orientation == 0
+ # Forward indexing for small side or orientation == 0.
+ indexing = :i_forward
+ else
+ # Backward indexing for large side with reversed orientation.
+ indexing = :i_backward
+ # Since the orientation is reversed we have to account for this
+ # when filling the `neighbor_ids` array.
+ mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] +
+ 1
+ mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] +
+ 1
+ end
+
+ if faces[side] == 0
+ # Index face in negative x-direction
+ mortars.node_indices[side, mortar_id] = (:begin, indexing)
+ elseif faces[side] == 1
+ # Index face in positive x-direction
+ mortars.node_indices[side, mortar_id] = (:end, indexing)
+ elseif faces[side] == 2
+ # Index face in negative y-direction
+ mortars.node_indices[side, mortar_id] = (indexing, :begin)
+ else # faces[side] == 3
+ # Index face in positive y-direction
+ mortars.node_indices[side, mortar_id] = (indexing, :end)
+ end
+ end
+
+ # else: "level > neighbor_level" is skipped since we visit the mortar interface only once.
+ end
+
+ # Domain boundary.
+ else
+ local_num_boundary += 1
+ boundary_id = local_num_boundary
+
+ boundaries.neighbor_ids[boundary_id] = current_index + 1
+
+ if iface == 0
+ # Index face in negative x-direction.
+ boundaries.node_indices[boundary_id] = (:begin, :i_forward)
+ elseif iface == 1
+ # Index face in positive x-direction.
+ boundaries.node_indices[boundary_id] = (:end, :i_forward)
+ elseif iface == 2
+ # Index face in negative y-direction.
+ boundaries.node_indices[boundary_id] = (:i_forward, :begin)
+ else # iface == 3
+ # Index face in positive y-direction.
+ boundaries.node_indices[boundary_id] = (:i_forward, :end)
+ end
+
+ # One-based indexing.
+ boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1]
+ end
+
+ t8_free(dual_faces_ref[])
+ t8_free(pneighbor_leafs_ref[])
+ t8_free(pelement_indices_ref[])
+ end # for iface = ...
+
+ current_index += 1
+ end # for
+ end # for
+
+ return (interfaces = local_num_conform,
+ mortars = local_num_mortars,
+ boundaries = local_num_boundary)
+end
+
+function trixi_t8_get_local_element_levels(forest)
+ # Check that forest is a committed, that is valid and usable, forest.
+ @assert t8_forest_is_committed(forest) != 0
+
+ levels = Vector{Int}(undef, t8_forest_get_local_num_elements(forest))
+
+ # Get the number of trees that have elements of this process.
+ num_local_trees = t8_forest_get_num_local_trees(forest)
+
+ current_index = 0
+
+ for itree in 0:(num_local_trees - 1)
+ tree_class = t8_forest_get_tree_class(forest, itree)
+ eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class)
+
+ # Get the number of elements of this tree.
+ num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree)
+
+ for ielement in 0:(num_elements_in_tree - 1)
+ element = t8_forest_get_element_in_tree(forest, itree, ielement)
+ current_index += 1
+ levels[current_index] = t8_element_level(eclass_scheme, element)
+ end # for
+ end # for
+
+ return levels
+end
+
+# Callback function prototype to decide for refining and coarsening.
+# If `is_family` equals 1, the first `num_elements` in elements
+# form a family and we decide whether this family should be coarsened
+# or only the first element should be refined.
+# Otherwise `is_family` must equal zero and we consider the first entry
+# of the element array for refinement.
+# Entries of the element array beyond the first `num_elements` are undefined.
+# \param [in] forest the forest to which the new elements belong
+# \param [in] forest_from the forest that is adapted.
+# \param [in] which_tree the local tree containing `elements`
+# \param [in] lelement_id the local element id in `forest_old` in the tree of the current element
+# \param [in] ts the eclass scheme of the tree
+# \param [in] is_family if 1, the first `num_elements` entries in `elements` form a family. If 0, they do not.
+# \param [in] num_elements the number of entries in `elements` that are defined
+# \param [in] elements Pointers to a family or, if `is_family` is zero,
+# pointer to one element.
+# \return greater zero if the first entry in `elements` should be refined,
+# smaller zero if the family `elements` shall be coarsened,
+# zero else.
+function adapt_callback(forest,
+ forest_from,
+ which_tree,
+ lelement_id,
+ ts,
+ is_family,
+ num_elements,
+ elements)::Cint
+ num_levels = t8_forest_get_local_num_elements(forest_from)
+
+ indicator_ptr = Ptr{Int}(t8_forest_get_user_data(forest))
+ indicators = unsafe_wrap(Array, indicator_ptr, num_levels)
+
+ offset = t8_forest_get_tree_element_offset(forest_from, which_tree)
+
+ # Only allow coarsening for complete families.
+ if indicators[offset + lelement_id + 1] < 0 && is_family == 0
+ return Cint(0)
+ end
+
+ return Cint(indicators[offset + lelement_id + 1])
+end
+
+function trixi_t8_adapt_new(old_forest, indicators)
+ # Check that forest is a committed, that is valid and usable, forest.
+ @assert t8_forest_is_committed(old_forest) != 0
+
+ # Init new forest.
+ new_forest_ref = Ref{t8_forest_t}()
+ t8_forest_init(new_forest_ref)
+ new_forest = new_forest_ref[]
+
+ let set_from = C_NULL, recursive = 0, set_for_coarsening = 0, no_repartition = 0
+ t8_forest_set_user_data(new_forest, pointer(indicators))
+ t8_forest_set_adapt(new_forest, old_forest, @t8_adapt_callback(adapt_callback),
+ recursive)
+ t8_forest_set_balance(new_forest, set_from, no_repartition)
+ t8_forest_set_partition(new_forest, set_from, set_for_coarsening)
+ t8_forest_set_ghost(new_forest, 1, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call.
+ t8_forest_commit(new_forest)
+ end
+
+ return new_forest
+end
+
+function trixi_t8_get_difference(old_levels, new_levels, num_children)
+ old_nelems = length(old_levels)
+ new_nelems = length(new_levels)
+
+ changes = Vector{Int}(undef, old_nelems)
+
+ # Local element indices.
+ old_index = 1
+ new_index = 1
+
+ while old_index <= old_nelems && new_index <= new_nelems
+ if old_levels[old_index] < new_levels[new_index]
+ # Refined.
+
+ changes[old_index] = 1
+
+ old_index += 1
+ new_index += num_children
+
+ elseif old_levels[old_index] > new_levels[new_index]
+ # Coarsend.
+
+ for child_index in old_index:(old_index + num_children - 1)
+ changes[child_index] = -1
+ end
+
+ old_index += num_children
+ new_index += 1
+
+ else
+ # No changes.
+
+ changes[old_index] = 0
+
+ old_index += 1
+ new_index += 1
+ end
+ end
+
+ return changes
+end
+
+# Coarsen or refine marked cells and rebalance forest. Return a difference between
+# old and new mesh.
+function trixi_t8_adapt!(mesh, indicators)
+ old_levels = trixi_t8_get_local_element_levels(mesh.forest)
+
+ forest_cached = trixi_t8_adapt_new(mesh.forest, indicators)
+
+ new_levels = trixi_t8_get_local_element_levels(forest_cached)
+
+ differences = trixi_t8_get_difference(old_levels, new_levels, 2^ndims(mesh))
+
+ mesh.forest = forest_cached
+
+ return differences
+end
diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl
index bef49b4c482..4d80e6e1139 100644
--- a/src/callbacks_step/amr.jl
+++ b/src/callbacks_step/amr.jl
@@ -471,6 +471,65 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh,
return has_changed
end
+function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMesh,
+ equations, dg::DG, cache, semi,
+ t, iter;
+ only_refine = false, only_coarsen = false,
+ passive_args = ())
+ has_changed = false
+
+ @unpack controller, adaptor = amr_callback
+
+ u = wrap_array(u_ode, mesh, equations, dg, cache)
+ indicators = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg,
+ cache, t = t, iter = iter)
+
+ if only_coarsen
+ indicators[indicators .> 0] .= 0
+ end
+
+ if only_refine
+ indicators[indicators .< 0] .= 0
+ end
+
+ @boundscheck begin
+ @assert axes(indicators)==(Base.OneTo(ncells(mesh)),) ("Indicator array (axes = $(axes(indicators))) and mesh cells (axes = $(Base.OneTo(ncells(mesh)))) have different axes")
+ end
+
+ @trixi_timeit timer() "adapt" begin
+ difference = @trixi_timeit timer() "mesh" trixi_t8_adapt!(mesh, indicators)
+
+ @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg,
+ cache, difference)
+ end
+
+ # Store whether there were any cells coarsened or refined and perform load balancing.
+ has_changed = any(difference .!= 0)
+
+ # TODO: T8codeMesh for MPI not implemented yet.
+ # Check if mesh changed on other processes
+ # if mpi_isparallel()
+ # has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[]
+ # end
+
+ if has_changed
+ # TODO: T8codeMesh for MPI not implemented yet.
+ # if mpi_isparallel() && amr_callback.dynamic_load_balancing
+ # @trixi_timeit timer() "dynamic load balancing" begin
+ # global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1)
+ # old_global_first_quadrant = copy(global_first_quadrant)
+ # partition!(mesh)
+ # rebalance_solver!(u_ode, mesh, equations, dg, cache, old_global_first_quadrant)
+ # end
+ # end
+
+ reinitialize_boundaries!(semi.boundary_conditions, cache)
+ end
+
+ # Return true if there were any cells coarsened or refined, otherwise false.
+ return has_changed
+end
+
function reinitialize_boundaries!(boundary_conditions::UnstructuredSortedBoundaryTypes,
cache)
# Reinitialize boundary types container because boundaries may have changed.
@@ -639,6 +698,10 @@ function current_element_levels(mesh::P4estMesh, solver, cache)
return current_levels
end
+function current_element_levels(mesh::T8codeMesh, solver, cache)
+ return trixi_t8_get_local_element_levels(mesh.forest)
+end
+
# TODO: Taal refactor, merge the two loops of ControllerThreeLevel and IndicatorLöhner etc.?
# But that would remove the simplest possibility to write that stuff to a file...
# We could of course implement some additional logic and workarounds, but is it worth the effort?
diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl
index 400d16347d5..1d37dfce034 100644
--- a/src/callbacks_step/amr_dg2d.jl
+++ b/src/callbacks_step/amr_dg2d.jl
@@ -333,9 +333,79 @@ function coarsen_elements!(u::AbstractArray{<:Any, 4}, element_id,
end
end
+# Coarsen and refine elements in the DG solver based on a difference list.
+function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations,
+ dg::DGSEM, cache, difference)
+
+ # Return early if there is nothing to do.
+ if !any(difference .!= 0)
+ return nothing
+ end
+
+ # Number of (local) cells/elements.
+ old_nelems = nelements(dg, cache)
+ new_nelems = ncells(mesh)
+
+ # Local element indices.
+ old_index = 1
+ new_index = 1
+
+ # Note: This is true for `quads` only.
+ T8_CHILDREN = 4
+
+ # Retain current solution data.
+ old_u_ode = copy(u_ode)
+
+ GC.@preserve old_u_ode begin
+ old_u = wrap_array(old_u_ode, mesh, equations, dg, cache)
+
+ reinitialize_containers!(mesh, equations, dg, cache)
+
+ resize!(u_ode,
+ nvariables(equations) * nnodes(dg)^ndims(mesh) * nelements(dg, cache))
+ u = wrap_array(u_ode, mesh, equations, dg, cache)
+
+ while old_index <= old_nelems && new_index <= new_nelems
+ if difference[old_index] > 0 # Refine.
+
+ # Refine element and store solution directly in new data structure.
+ refine_element!(u, new_index, old_u, old_index, adaptor, equations, dg)
+
+ old_index += 1
+ new_index += T8_CHILDREN
+
+ elseif difference[old_index] < 0 # Coarsen.
+
+ # If an element is to be removed, sanity check if the following elements
+ # are also marked - otherwise there would be an error in the way the
+ # cells/elements are sorted.
+ @assert all(difference[old_index:(old_index + T8_CHILDREN - 1)] .< 0) "bad cell/element order"
+
+ # Coarsen elements and store solution directly in new data structure.
+ coarsen_elements!(u, new_index, old_u, old_index, adaptor, equations,
+ dg)
+
+ old_index += T8_CHILDREN
+ new_index += 1
+
+ else # No changes.
+
+ # Copy old element data to new element container.
+ @views u[:, .., new_index] .= old_u[:, .., old_index]
+
+ old_index += 1
+ new_index += 1
+ end
+ end # while
+ end # GC.@preserve old_u_ode
+
+ return nothing
+end
+
# this method is called when an `ControllerThreeLevel` is constructed
function create_cache(::Type{ControllerThreeLevel},
- mesh::Union{TreeMesh{2}, P4estMesh{2}}, equations, dg::DG, cache)
+ mesh::Union{TreeMesh{2}, P4estMesh{2}, T8codeMesh{2}}, equations,
+ dg::DG, cache)
controller_value = Vector{Int}(undef, nelements(dg, cache))
return (; controller_value)
end
diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl
index 7c453aab633..fad42b11098 100644
--- a/src/callbacks_step/analysis.jl
+++ b/src/callbacks_step/analysis.jl
@@ -534,6 +534,36 @@ function print_amr_information(callbacks, mesh::P4estMesh, solver, cache)
return nothing
end
+# Print level information only if AMR is enabled
+function print_amr_information(callbacks, mesh::T8codeMesh, solver, cache)
+
+ # Return early if there is nothing to print
+ uses_amr(callbacks) || return nothing
+
+ # TODO: Switch to global element levels array when MPI supported or find
+ # another solution.
+ levels = trixi_t8_get_local_element_levels(mesh.forest)
+
+ min_level = minimum(levels)
+ max_level = maximum(levels)
+
+ mpi_println(" minlevel = $min_level")
+ mpi_println(" maxlevel = $max_level")
+
+ if min_level > 0
+ elements_per_level = [count(==(l), levels) for l in 1:max_level]
+
+ for level in max_level:-1:(min_level + 1)
+ mpi_println(" ├── level $level: " *
+ @sprintf("% 14d", elements_per_level[level]))
+ end
+ mpi_println(" └── level $min_level: " *
+ @sprintf("% 14d", elements_per_level[min_level]))
+ end
+
+ return nothing
+end
+
# Iterate over tuples of analysis integrals in a type-stable way using "lispy tuple programming".
function analyze_integrals(analysis_integrals::NTuple{N, Any}, io, du, u, t,
semi) where {N}
diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl
index 6c74e172e46..4e456f79872 100644
--- a/src/callbacks_step/analysis_dg2d.jl
+++ b/src/callbacks_step/analysis_dg2d.jl
@@ -31,7 +31,7 @@ end
function create_cache_analysis(analyzer,
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache,
RealT, uEltype)
@@ -108,7 +108,7 @@ end
function calc_error_norms(func, u, t, analyzer,
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}}, equations,
+ P4estMesh{2}, T8codeMesh{2}}, equations,
initial_condition, dg::DGSEM, cache, cache_analysis)
@unpack vandermonde, weights = analyzer
@unpack node_coordinates, inverse_jacobian = cache.elements
@@ -176,7 +176,7 @@ end
function integrate_via_indices(func::Func, u,
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}}, equations,
+ P4estMesh{2}, T8codeMesh{2}}, equations,
dg::DGSEM, cache, args...; normalize = true) where {Func}
@unpack weights = dg.basis
@@ -204,7 +204,7 @@ end
function integrate(func::Func, u,
mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache; normalize = true) where {Func}
integrate_via_indices(u, mesh, equations, dg, cache;
normalize = normalize) do u, i, j, element, equations, dg
@@ -215,7 +215,7 @@ end
function analyze(::typeof(entropy_timederivative), du, u, t,
mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache)
# Calculate ∫(∂S/∂u ⋅ ∂u/∂t)dΩ
integrate_via_indices(u, mesh, equations, dg, cache,
@@ -259,7 +259,8 @@ function analyze(::Val{:l2_divb}, du, u, t,
end
function analyze(::Val{:l2_divb}, du, u, t,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2}},
+ mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
equations::IdealGlmMhdEquations2D, dg::DGSEM, cache)
@unpack contravariant_vectors = cache.elements
integrate_via_indices(u, mesh, equations, dg, cache, cache,
@@ -326,7 +327,8 @@ function analyze(::Val{:linf_divb}, du, u, t,
end
function analyze(::Val{:linf_divb}, du, u, t,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2}},
+ mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
equations::IdealGlmMhdEquations2D, dg::DGSEM, cache)
@unpack derivative_matrix, weights = dg.basis
@unpack contravariant_vectors = cache.elements
diff --git a/src/callbacks_step/save_restart_dg.jl b/src/callbacks_step/save_restart_dg.jl
index 5695eb8bede..8db6db2d2b8 100644
--- a/src/callbacks_step/save_restart_dg.jl
+++ b/src/callbacks_step/save_restart_dg.jl
@@ -7,7 +7,8 @@
function save_restart_file(u, time, dt, timestep,
mesh::Union{SerialTreeMesh, StructuredMesh,
- UnstructuredMesh2D, SerialP4estMesh},
+ UnstructuredMesh2D, SerialP4estMesh,
+ SerialT8codeMesh},
equations, dg::DG, cache,
restart_callback)
@unpack output_directory = restart_callback
diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl
index 6cd4a0ec9c1..6d5004ff65f 100644
--- a/src/callbacks_step/save_solution_dg.jl
+++ b/src/callbacks_step/save_solution_dg.jl
@@ -7,7 +7,8 @@
function save_solution_file(u, time, dt, timestep,
mesh::Union{SerialTreeMesh, StructuredMesh,
- UnstructuredMesh2D, SerialP4estMesh},
+ UnstructuredMesh2D, SerialP4estMesh,
+ SerialT8codeMesh},
equations, dg::DG, cache,
solution_callback, element_variables = Dict{Symbol, Any}();
system = "")
diff --git a/src/callbacks_step/stepsize_dg2d.jl b/src/callbacks_step/stepsize_dg2d.jl
index 89a2b2b8350..673c3ba6aa6 100644
--- a/src/callbacks_step/stepsize_dg2d.jl
+++ b/src/callbacks_step/stepsize_dg2d.jl
@@ -75,7 +75,9 @@ function max_dt(u, t, mesh::ParallelTreeMesh{2},
return dt
end
-function max_dt(u, t, mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2}},
+function max_dt(u, t,
+ mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
constant_speed::False, equations, dg::DG, cache)
# to avoid a division by zero if the speed vanishes everywhere,
# e.g. for steady-state linear advection
@@ -109,7 +111,9 @@ function max_dt(u, t, mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMe
return 2 / (nnodes(dg) * max_scaled_speed)
end
-function max_dt(u, t, mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2}},
+function max_dt(u, t,
+ mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
constant_speed::True, equations, dg::DG, cache)
@unpack contravariant_vectors, inverse_jacobian = cache.elements
diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl
index da67fe23e0e..b9895e7d454 100644
--- a/src/meshes/mesh_io.jl
+++ b/src/meshes/mesh_io.jl
@@ -6,7 +6,7 @@
#! format: noindent
# Save current mesh with some context information as an HDF5 file.
-function save_mesh_file(mesh::Union{TreeMesh, P4estMesh}, output_directory,
+function save_mesh_file(mesh::Union{TreeMesh, P4estMesh, T8codeMesh}, output_directory,
timestep = 0)
save_mesh_file(mesh, output_directory, timestep, mpi_parallel(mesh))
end
@@ -220,6 +220,13 @@ function save_mesh_file(mesh::P4estMesh, output_directory, timestep, mpi_paralle
return filename
end
+# TODO: Implement this function as soon as there is support for this in `t8code`.
+function save_mesh_file(mesh::T8codeMesh, output_directory, timestep, mpi_parallel)
+ error("Mesh file output not supported yet for `T8codeMesh`.")
+
+ return joinpath(output_directory, "dummy_mesh.h5")
+end
+
"""
load_mesh(restart_file::AbstractString; n_cells_max)
diff --git a/src/meshes/meshes.jl b/src/meshes/meshes.jl
index 2716aa2007b..ed2158b169a 100644
--- a/src/meshes/meshes.jl
+++ b/src/meshes/meshes.jl
@@ -12,6 +12,7 @@ include("unstructured_mesh.jl")
include("face_interpolant.jl")
include("transfinite_mappings_3d.jl")
include("p4est_mesh.jl")
+include("t8code_mesh.jl")
include("mesh_io.jl")
include("dgmulti_meshes.jl")
end # @muladd
diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl
new file mode 100644
index 00000000000..13edcc29711
--- /dev/null
+++ b/src/meshes/t8code_mesh.jl
@@ -0,0 +1,345 @@
+"""
+ T8codeMesh{NDIMS} <: AbstractMesh{NDIMS}
+
+An unstructured curved mesh based on trees that uses the C library
+['t8code'](https://github.com/DLR-AMR/t8code)
+to manage trees and mesh refinement.
+"""
+mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <:
+ AbstractMesh{NDIMS}
+ cmesh :: Ptr{t8_cmesh} # cpointer to coarse mesh
+ scheme :: Ptr{t8_eclass_scheme} # cpointer to element scheme
+ forest :: Ptr{t8_forest} # cpointer to forest
+ is_parallel :: IsParallel
+
+ # This specifies the geometry interpolation for each tree.
+ tree_node_coordinates::Array{RealT, NDIMSP2} # [dimension, i, j, k, tree]
+
+ # Stores the quadrature nodes.
+ nodes::SVector{NNODES, RealT}
+
+ boundary_names :: Array{Symbol, 2} # [face direction, tree]
+ current_filename :: String
+
+ ninterfaces :: Int
+ nmortars :: Int
+ nboundaries :: Int
+
+ function T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes,
+ boundary_names,
+ current_filename) where {NDIMS}
+ is_parallel = False()
+
+ mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(cmesh,
+ scheme,
+ forest,
+ is_parallel)
+
+ mesh.nodes = nodes
+ mesh.boundary_names = boundary_names
+ mesh.current_filename = current_filename
+ mesh.tree_node_coordinates = tree_node_coordinates
+
+ finalizer(mesh) do mesh
+ # When finalizing `mesh.forest`, `mesh.scheme` and `mesh.cmesh` are
+ # also cleaned up from within `t8code`. The cleanup code for
+ # `cmesh` does some MPI calls for deallocating shared memory
+ # arrays. Due to garbage collection in Julia the order of shutdown
+ # is not deterministic. The following code might happen after MPI
+ # is already in finalized state.
+ # If the environment variable `TRIXI_T8CODE_SC_FINALIZE` is set the
+ # `finalize_hook` of the MPI module takes care of the cleanup. See
+ # further down. However, this might cause a pile-up of `mesh`
+ # objects during long-running sessions.
+ if !MPI.Finalized()
+ trixi_t8_unref_forest(mesh.forest)
+ end
+ end
+
+ # This finalizer call is only recommended during development and not for
+ # production runs, especially long-running sessions since a reference to
+ # the `mesh` object will be kept throughout the lifetime of the session.
+ # See comments in `init_t8code()` in file `src/auxiliary/t8code.jl` for
+ # more information.
+ if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE")
+ MPI.add_finalize_hook!() do
+ trixi_t8_unref_forest(mesh.forest)
+ end
+ end
+
+ return mesh
+ end
+end
+
+const SerialT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:False}
+@inline mpi_parallel(mesh::SerialT8codeMesh) = False()
+
+@inline Base.ndims(::T8codeMesh{NDIMS}) where {NDIMS} = NDIMS
+@inline Base.real(::T8codeMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT
+
+@inline ntrees(mesh::T8codeMesh) = Int(t8_forest_get_num_local_trees(mesh.forest))
+@inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest))
+@inline ninterfaces(mesh::T8codeMesh) = mesh.ninterfaces
+@inline nmortars(mesh::T8codeMesh) = mesh.nmortars
+@inline nboundaries(mesh::T8codeMesh) = mesh.nboundaries
+
+function Base.show(io::IO, mesh::T8codeMesh)
+ print(io, "T8codeMesh{", ndims(mesh), ", ", real(mesh), "}")
+end
+
+function Base.show(io::IO, ::MIME"text/plain", mesh::T8codeMesh)
+ if get(io, :compact, false)
+ show(io, mesh)
+ else
+ setup = [
+ "#trees" => ntrees(mesh),
+ "current #cells" => ncells(mesh),
+ "polydeg" => length(mesh.nodes) - 1,
+ ]
+ summary_box(io,
+ "T8codeMesh{" * string(ndims(mesh)) * ", " * string(real(mesh)) * "}",
+ setup)
+ end
+end
+
+"""
+ T8codeMesh(trees_per_dimension; polydeg, mapping=identity,
+ RealT=Float64, initial_refinement_level=0, periodicity=true)
+
+Create a structured potentially curved 'T8codeMesh' of the specified size.
+
+Non-periodic boundaries will be called ':x_neg', ':x_pos', ':y_neg', ':y_pos', ':z_neg', ':z_pos'.
+
+# Arguments
+- 'trees_per_dimension::NTupleE{NDIMS, Int}': the number of trees in each dimension.
+- 'polydeg::Integer': polynomial degree used to store the geometry of the mesh.
+ The mapping will be approximated by an interpolation polynomial
+ of the specified degree for each tree.
+- 'mapping': a function of 'NDIMS' variables to describe the mapping that transforms
+ the reference mesh ('[-1, 1]^n') to the physical domain.
+- 'RealT::Type': the type that should be used for coordinates.
+- 'initial_refinement_level::Integer': refine the mesh uniformly to this level before the simulation starts.
+- 'periodicity': either a 'Bool' deciding if all of the boundaries are periodic or an 'NTuple{NDIMS, Bool}'
+ deciding for each dimension if the boundaries in this dimension are periodic.
+"""
+function T8codeMesh(trees_per_dimension; polydeg,
+ mapping = coordinates2mapping((-1.0, -1.0), (1.0, 1.0)),
+ RealT = Float64, initial_refinement_level = 0, periodicity = true)
+ NDIMS = length(trees_per_dimension)
+
+ @assert NDIMS == 2 # Only support for NDIMS = 2 yet.
+
+ # Convert periodicity to a Tuple of a Bool for every dimension
+ if all(periodicity)
+ # Also catches case where periodicity = true
+ periodicity = ntuple(_ -> true, NDIMS)
+ elseif !any(periodicity)
+ # Also catches case where periodicity = false
+ periodicity = ntuple(_ -> false, NDIMS)
+ else
+ # Default case if periodicity is an iterable
+ periodicity = Tuple(periodicity)
+ end
+
+ conn = T8code.Libt8.p4est_connectivity_new_brick(trees_per_dimension..., periodicity...)
+ do_partition = 0
+ cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), do_partition)
+ T8code.Libt8.p4est_connectivity_destroy(conn)
+
+ scheme = t8_scheme_new_default_cxx()
+ forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm())
+
+ basis = LobattoLegendreBasis(RealT, polydeg)
+ nodes = basis.nodes
+
+ tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS,
+ ntuple(_ -> length(nodes), NDIMS)...,
+ prod(trees_per_dimension))
+
+ # Get cell length in reference mesh: Omega_ref = [-1,1]^2.
+ dx = 2 / trees_per_dimension[1]
+ dy = 2 / trees_per_dimension[2]
+
+ num_local_trees = t8_cmesh_get_num_local_trees(cmesh)
+
+ # Non-periodic boundaries.
+ boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension))
+
+ for itree in 1:num_local_trees
+ veptr = t8_cmesh_get_tree_vertices(cmesh, itree - 1)
+ verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))
+
+ # Calculate node coordinates of reference mesh.
+ cell_x_offset = (verts[1, 1] - 1 / 2 * (trees_per_dimension[1] - 1)) * dx
+ cell_y_offset = (verts[2, 1] - 1 / 2 * (trees_per_dimension[2] - 1)) * dy
+
+ for j in eachindex(nodes), i in eachindex(nodes)
+ tree_node_coordinates[:, i, j, itree] .= mapping(cell_x_offset +
+ dx * nodes[i] / 2,
+ cell_y_offset +
+ dy * nodes[j] / 2)
+ end
+
+ if !periodicity[1]
+ boundary_names[1, itree] = :x_neg
+ boundary_names[2, itree] = :x_pos
+ end
+
+ if !periodicity[2]
+ boundary_names[3, itree] = :y_neg
+ boundary_names[4, itree] = :y_pos
+ end
+ end
+
+ return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes,
+ boundary_names, "")
+end
+
+"""
+ T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh},
+ mapping=nothing, polydeg=1, RealT=Float64,
+ initial_refinement_level=0)
+
+Main mesh constructor for the `T8codeMesh` that imports an unstructured,
+conforming mesh from a `t8_cmesh` data structure.
+
+# Arguments
+- `cmesh::Ptr{t8_cmesh}`: Pointer to a cmesh object.
+- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms
+ the imported mesh to the physical domain. Use `nothing` for the identity map.
+- `polydeg::Integer`: polynomial degree used to store the geometry of the mesh.
+ The mapping will be approximated by an interpolation polynomial
+ of the specified degree for each tree.
+ The default of `1` creates an uncurved geometry. Use a higher value if the mapping
+ will curve the imported uncurved mesh.
+- `RealT::Type`: the type that should be used for coordinates.
+- `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts.
+"""
+function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh};
+ mapping = nothing, polydeg = 1, RealT = Float64,
+ initial_refinement_level = 0) where {NDIMS}
+ @assert NDIMS == 2 # Only support for NDIMS = 2 yet.
+
+ scheme = t8_scheme_new_default_cxx()
+ forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm())
+
+ basis = LobattoLegendreBasis(RealT, polydeg)
+ nodes = basis.nodes
+
+ num_local_trees = t8_cmesh_get_num_local_trees(cmesh)
+
+ tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS,
+ ntuple(_ -> length(nodes), NDIMS)...,
+ num_local_trees)
+
+ nodes_in = [-1.0, 1.0]
+ matrix = polynomial_interpolation_matrix(nodes_in, nodes)
+ data_in = Array{RealT, 3}(undef, 2, 2, 2)
+ tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in))
+
+ for itree in 0:(num_local_trees - 1)
+ veptr = t8_cmesh_get_tree_vertices(cmesh, itree)
+ verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))
+
+ u = verts[:, 2] - verts[:, 1]
+ v = verts[:, 3] - verts[:, 1]
+ w = [0.0, 0.0, 1.0]
+
+ vol = dot(cross(u, v), w)
+
+ if vol < 0.0
+ @warn "Discovered negative volumes in `cmesh`: vol = $vol"
+ end
+
+ # Tree vertices are stored in z-order.
+ @views data_in[:, 1, 1] .= verts[1:2, 1]
+ @views data_in[:, 2, 1] .= verts[1:2, 2]
+ @views data_in[:, 1, 2] .= verts[1:2, 3]
+ @views data_in[:, 2, 2] .= verts[1:2, 4]
+
+ # Interpolate corner coordinates to specified nodes.
+ multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, itree + 1),
+ matrix, matrix,
+ data_in,
+ tmp1)
+ end
+
+ map_node_coordinates!(tree_node_coordinates, mapping)
+
+ # There's no simple and generic way to distinguish boundaries. Name all of them :all.
+ boundary_names = fill(:all, 2 * NDIMS, num_local_trees)
+
+ return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes,
+ boundary_names, "")
+end
+
+"""
+ T8codeMesh{NDIMS}(conn::Ptr{p4est_connectivity},
+ mapping=nothing, polydeg=1, RealT=Float64,
+ initial_refinement_level=0)
+
+Main mesh constructor for the `T8codeMesh` that imports an unstructured,
+conforming mesh from a `p4est_connectivity` data structure.
+
+# Arguments
+- `conn::Ptr{p4est_connectivity}`: Pointer to a P4est connectivity object.
+- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms
+ the imported mesh to the physical domain. Use `nothing` for the identity map.
+- `polydeg::Integer`: polynomial degree used to store the geometry of the mesh.
+ The mapping will be approximated by an interpolation polynomial
+ of the specified degree for each tree.
+ The default of `1` creates an uncurved geometry. Use a higher value if the mapping
+ will curve the imported uncurved mesh.
+- `RealT::Type`: the type that should be used for coordinates.
+- `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts.
+"""
+function T8codeMesh{NDIMS}(conn::Ptr{p4est_connectivity}; kwargs...) where {NDIMS}
+ @assert NDIMS == 2 # Only support for NDIMS = 2 yet.
+
+ cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), 0)
+
+ return T8codeMesh{NDIMS}(cmesh; kwargs...)
+end
+
+"""
+ T8codeMesh{NDIMS}(meshfile::String;
+ mapping=nothing, polydeg=1, RealT=Float64,
+ initial_refinement_level=0)
+
+Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming
+mesh from a Gmsh mesh file (`.msh`).
+
+# Arguments
+- `meshfile::String`: path to a Gmsh mesh file.
+- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms
+ the imported mesh to the physical domain. Use `nothing` for the identity map.
+- `polydeg::Integer`: polynomial degree used to store the geometry of the mesh.
+ The mapping will be approximated by an interpolation polynomial
+ of the specified degree for each tree.
+ The default of `1` creates an uncurved geometry. Use a higher value if the mapping
+ will curve the imported uncurved mesh.
+- `RealT::Type`: the type that should be used for coordinates.
+- `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts.
+"""
+function T8codeMesh{NDIMS}(meshfile::String; kwargs...) where {NDIMS}
+ @assert NDIMS == 2 # Only support for NDIMS = 2 yet.
+
+ # Prevent `t8code` from crashing Julia if the file doesn't exist.
+ @assert isfile(meshfile)
+
+ meshfile_prefix, meshfile_suffix = splitext(meshfile)
+
+ cmesh = t8_cmesh_from_msh_file(meshfile_prefix, 0, mpi_comm(), NDIMS, 0, 0)
+
+ return T8codeMesh{NDIMS}(cmesh; kwargs...)
+end
+
+# TODO: Just a placeholder. Will be implemented later when MPI is supported.
+function balance!(mesh::T8codeMesh, init_fn = C_NULL)
+ return nothing
+end
+
+# TODO: Just a placeholder. Will be implemented later when MPI is supported.
+function partition!(mesh::T8codeMesh; allow_coarsening = true, weight_fn = C_NULL)
+ return nothing
+end
diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl
index 2536cfe0bf2..495e0ffc4a4 100644
--- a/src/solvers/dg.jl
+++ b/src/solvers/dg.jl
@@ -363,7 +363,8 @@ function get_element_variables!(element_variables, u, mesh, equations, dg::DG, c
dg, cache)
end
-const MeshesDGSEM = Union{TreeMesh, StructuredMesh, UnstructuredMesh2D, P4estMesh}
+const MeshesDGSEM = Union{TreeMesh, StructuredMesh, UnstructuredMesh2D, P4estMesh,
+ T8codeMesh}
@inline function ndofs(mesh::MeshesDGSEM, dg::DG, cache)
nelements(cache.elements) * nnodes(dg)^ndims(mesh)
@@ -679,4 +680,5 @@ include("dgsem_tree/dg.jl")
include("dgsem_structured/dg.jl")
include("dgsem_unstructured/dg.jl")
include("dgsem_p4est/dg.jl")
+include("dgsem_t8code/dg.jl")
end # @muladd
diff --git a/src/solvers/dgsem_p4est/containers.jl b/src/solvers/dgsem_p4est/containers.jl
index 2b9c6987d24..0176f5c6346 100644
--- a/src/solvers/dgsem_p4est/containers.jl
+++ b/src/solvers/dgsem_p4est/containers.jl
@@ -81,7 +81,8 @@ function Base.resize!(elements::P4estElementContainer, capacity)
end
# Create element container and initialize element data
-function init_elements(mesh::P4estMesh{NDIMS, RealT}, equations,
+function init_elements(mesh::Union{P4estMesh{NDIMS, RealT}, T8codeMesh{NDIMS, RealT}},
+ equations,
basis,
::Type{uEltype}) where {NDIMS, RealT <: Real, uEltype <: Real}
nelements = ncells(mesh)
@@ -165,7 +166,7 @@ function Base.resize!(interfaces::P4estInterfaceContainer, capacity)
end
# Create interface container and initialize interface data.
-function init_interfaces(mesh::P4estMesh, equations, basis, elements)
+function init_interfaces(mesh::Union{P4estMesh, T8codeMesh}, equations, basis, elements)
NDIMS = ndims(elements)
uEltype = eltype(elements)
@@ -240,7 +241,7 @@ function Base.resize!(boundaries::P4estBoundaryContainer, capacity)
end
# Create interface container and initialize interface data in `elements`.
-function init_boundaries(mesh::P4estMesh, equations, basis, elements)
+function init_boundaries(mesh::Union{P4estMesh, T8codeMesh}, equations, basis, elements)
NDIMS = ndims(elements)
uEltype = eltype(elements)
@@ -371,7 +372,7 @@ function Base.resize!(mortars::P4estMortarContainer, capacity)
end
# Create mortar container and initialize mortar data.
-function init_mortars(mesh::P4estMesh, equations, basis, elements)
+function init_mortars(mesh::Union{P4estMesh, T8codeMesh}, equations, basis, elements)
NDIMS = ndims(elements)
uEltype = eltype(elements)
diff --git a/src/solvers/dgsem_p4est/containers_2d.jl b/src/solvers/dgsem_p4est/containers_2d.jl
index 11747f1f175..236d7d24c06 100644
--- a/src/solvers/dgsem_p4est/containers_2d.jl
+++ b/src/solvers/dgsem_p4est/containers_2d.jl
@@ -6,7 +6,8 @@
#! format: noindent
# Initialize data structures in element container
-function init_elements!(elements, mesh::P4estMesh{2}, basis::LobattoLegendreBasis)
+function init_elements!(elements, mesh::Union{P4estMesh{2}, T8codeMesh{2}},
+ basis::LobattoLegendreBasis)
@unpack node_coordinates, jacobian_matrix,
contravariant_vectors, inverse_jacobian = elements
@@ -25,7 +26,7 @@ end
# Interpolate tree_node_coordinates to each quadrant at the nodes of the specified basis
function calc_node_coordinates!(node_coordinates,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
basis::LobattoLegendreBasis)
# Hanging nodes will cause holes in the mesh if its polydeg is higher
# than the polydeg of the solver.
diff --git a/src/solvers/dgsem_p4est/dg_2d.jl b/src/solvers/dgsem_p4est/dg_2d.jl
index bc7d9edb6ef..97b931fa325 100644
--- a/src/solvers/dgsem_p4est/dg_2d.jl
+++ b/src/solvers/dgsem_p4est/dg_2d.jl
@@ -7,8 +7,8 @@
# The methods below are specialized on the mortar type
# and called from the basic `create_cache` method at the top.
-function create_cache(mesh::P4estMesh{2}, equations, mortar_l2::LobattoLegendreMortarL2,
- uEltype)
+function create_cache(mesh::Union{P4estMesh{2}, T8codeMesh{2}}, equations,
+ mortar_l2::LobattoLegendreMortarL2, uEltype)
# TODO: Taal performance using different types
MA2d = MArray{Tuple{nvariables(equations), nnodes(mortar_l2)},
uEltype, 2,
@@ -58,7 +58,7 @@ end
# We pass the `surface_integral` argument solely for dispatch
function prolong2interfaces!(cache, u,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
equations, surface_integral, dg::DG)
@unpack interfaces = cache
index_range = eachnode(dg)
@@ -114,7 +114,7 @@ function prolong2interfaces!(cache, u,
end
function calc_interface_flux!(surface_flux_values,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms,
equations, surface_integral, dg::DG, cache)
@unpack neighbor_ids, node_indices = cache.interfaces
@@ -182,7 +182,7 @@ end
# Inlined version of the interface flux computation for conservation laws
@inline function calc_interface_flux!(surface_flux_values,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::False, equations,
surface_integral, dg::DG, cache,
interface_index, normal_direction,
@@ -206,7 +206,7 @@ end
# Inlined version of the interface flux computation for equations with conservative and nonconservative terms
@inline function calc_interface_flux!(surface_flux_values,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::True, equations,
surface_integral, dg::DG, cache,
interface_index, normal_direction,
@@ -247,7 +247,7 @@ end
end
function prolong2boundaries!(cache, u,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
equations, surface_integral, dg::DG)
@unpack boundaries = cache
index_range = eachnode(dg)
@@ -276,7 +276,7 @@ function prolong2boundaries!(cache, u,
end
function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
equations, surface_integral, dg::DG)
@unpack boundaries = cache
@unpack surface_flux_values = cache.elements
@@ -312,7 +312,7 @@ end
# inlined version of the boundary flux calculation along a physical interface
@inline function calc_boundary_flux!(surface_flux_values, t, boundary_condition,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::False, equations,
surface_integral, dg::DG, cache,
i_index, j_index,
@@ -343,7 +343,7 @@ end
# inlined version of the boundary flux with nonconservative terms calculation along a physical interface
@inline function calc_boundary_flux!(surface_flux_values, t, boundary_condition,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::True, equations,
surface_integral, dg::DG, cache,
i_index, j_index,
@@ -385,7 +385,7 @@ end
end
function prolong2mortars!(cache, u,
- mesh::P4estMesh{2}, equations,
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}}, equations,
mortar_l2::LobattoLegendreMortarL2,
surface_integral, dg::DGSEM)
@unpack neighbor_ids, node_indices = cache.mortars
@@ -452,7 +452,7 @@ function prolong2mortars!(cache, u,
end
function calc_mortar_flux!(surface_flux_values,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms, equations,
mortar_l2::LobattoLegendreMortarL2,
surface_integral, dg::DG, cache)
@@ -511,7 +511,7 @@ end
# Inlined version of the mortar flux computation on small elements for conservation laws
@inline function calc_mortar_flux!(fstar,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::False, equations,
surface_integral, dg::DG, cache,
mortar_index, position_index, normal_direction,
@@ -531,7 +531,7 @@ end
# Inlined version of the mortar flux computation on small elements for equations with conservative and
# nonconservative terms
@inline function calc_mortar_flux!(fstar,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::True, equations,
surface_integral, dg::DG, cache,
mortar_index, position_index, normal_direction,
@@ -559,7 +559,8 @@ end
end
@inline function mortar_fluxes_to_elements!(surface_flux_values,
- mesh::P4estMesh{2}, equations,
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
+ equations,
mortar_l2::LobattoLegendreMortarL2,
dg::DGSEM, cache, mortar, fstar, u_buffer)
@unpack neighbor_ids, node_indices = cache.mortars
@@ -620,7 +621,7 @@ end
end
function calc_surface_integral!(du, u,
- mesh::P4estMesh{2},
+ mesh::Union{P4estMesh{2}, T8codeMesh{2}},
equations,
surface_integral::SurfaceIntegralWeakForm,
dg::DGSEM, cache)
diff --git a/src/solvers/dgsem_structured/dg_2d.jl b/src/solvers/dgsem_structured/dg_2d.jl
index c013bf62d98..3e8ce759b30 100644
--- a/src/solvers/dgsem_structured/dg_2d.jl
+++ b/src/solvers/dgsem_structured/dg_2d.jl
@@ -52,7 +52,7 @@ end
@inline function weak_form_kernel!(du, u,
element,
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::False, equations,
dg::DGSEM, cache, alpha = true)
# true * [some floating point value] == [exactly the same floating point value]
@@ -93,8 +93,8 @@ end
@inline function flux_differencing_kernel!(du, u,
element,
mesh::Union{StructuredMesh{2},
- UnstructuredMesh2D, P4estMesh{2}
- },
+ UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
nonconservative_terms::False, equations,
volume_flux, dg::DGSEM, cache, alpha = true)
@unpack derivative_split = dg.basis
@@ -150,8 +150,8 @@ end
@inline function flux_differencing_kernel!(du, u,
element,
mesh::Union{StructuredMesh{2},
- UnstructuredMesh2D, P4estMesh{2}
- },
+ UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
nonconservative_terms::True, equations,
volume_flux, dg::DGSEM, cache, alpha = true)
@unpack derivative_split = dg.basis
@@ -219,7 +219,7 @@ end
# [arXiv: 2008.12044v2](https://arxiv.org/pdf/2008.12044)
@inline function calcflux_fv!(fstar1_L, fstar1_R, fstar2_L, fstar2_R, u,
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::False, equations,
volume_flux_fv, dg::DGSEM, element, cache)
@unpack contravariant_vectors = cache.elements
@@ -289,7 +289,7 @@ end
@inline function calcflux_fv!(fstar1_L, fstar1_R, fstar2_L, fstar2_R,
u::AbstractArray{<:Any, 4},
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms::True, equations,
volume_flux_fv, dg::DGSEM, element, cache)
@unpack contravariant_vectors = cache.elements
@@ -609,9 +609,8 @@ function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple,
end
function apply_jacobian!(du,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2
- }
- },
+ mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
+ P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache)
@unpack inverse_jacobian = cache.elements
diff --git a/src/solvers/dgsem_t8code/containers.jl b/src/solvers/dgsem_t8code/containers.jl
new file mode 100644
index 00000000000..093feb2985a
--- /dev/null
+++ b/src/solvers/dgsem_t8code/containers.jl
@@ -0,0 +1,60 @@
+function reinitialize_containers!(mesh::T8codeMesh, equations, dg::DGSEM, cache)
+ # Re-initialize elements container.
+ @unpack elements = cache
+ resize!(elements, ncells(mesh))
+ init_elements!(elements, mesh, dg.basis)
+
+ count_required_surfaces!(mesh)
+
+ # Resize interfaces container.
+ @unpack interfaces = cache
+ resize!(interfaces, mesh.ninterfaces)
+
+ # Resize mortars container.
+ @unpack mortars = cache
+ resize!(mortars, mesh.nmortars)
+
+ # Resize boundaries container.
+ @unpack boundaries = cache
+ resize!(boundaries, mesh.nboundaries)
+
+ trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries,
+ mesh.boundary_names)
+
+ return nothing
+end
+
+function count_required_surfaces!(mesh::T8codeMesh)
+ counts = trixi_t8_count_interfaces(mesh.forest)
+
+ mesh.nmortars = counts.mortars
+ mesh.ninterfaces = counts.interfaces
+ mesh.nboundaries = counts.boundaries
+
+ return counts
+end
+
+# Compatibility to `dgsem_p4est/containers.jl`.
+function count_required_surfaces(mesh::T8codeMesh)
+ return (interfaces = mesh.ninterfaces,
+ mortars = mesh.nmortars,
+ boundaries = mesh.nboundaries)
+end
+
+# Compatibility to `dgsem_p4est/containers.jl`.
+function init_interfaces!(interfaces, mesh::T8codeMesh)
+ # Already computed. Do nothing.
+ return nothing
+end
+
+# Compatibility to `dgsem_p4est/containers.jl`.
+function init_mortars!(mortars, mesh::T8codeMesh)
+ # Already computed. Do nothing.
+ return nothing
+end
+
+# Compatibility to `dgsem_p4est/containers.jl`.
+function init_boundaries!(boundaries, mesh::T8codeMesh)
+ # Already computed. Do nothing.
+ return nothing
+end
diff --git a/src/solvers/dgsem_t8code/containers_2d.jl b/src/solvers/dgsem_t8code/containers_2d.jl
new file mode 100644
index 00000000000..029e6674afb
--- /dev/null
+++ b/src/solvers/dgsem_t8code/containers_2d.jl
@@ -0,0 +1,58 @@
+@muladd begin
+#! format: noindent
+
+# Interpolate tree_node_coordinates to each quadrant at the specified nodes.
+function calc_node_coordinates!(node_coordinates,
+ mesh::T8codeMesh{2},
+ nodes::AbstractVector)
+ # We use `StrideArray`s here since these buffers are used in performance-critical
+ # places and the additional information passed to the compiler makes them faster
+ # than native `Array`s.
+ tmp1 = StrideArray(undef, real(mesh),
+ StaticInt(2), static_length(nodes), static_length(mesh.nodes))
+ matrix1 = StrideArray(undef, real(mesh),
+ static_length(nodes), static_length(mesh.nodes))
+ matrix2 = similar(matrix1)
+ baryweights_in = barycentric_weights(mesh.nodes)
+
+ num_local_trees = t8_forest_get_num_local_trees(mesh.forest)
+
+ current_index = 0
+ for itree in 0:(num_local_trees - 1)
+ tree_class = t8_forest_get_tree_class(mesh.forest, itree)
+ eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class)
+ num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree)
+
+ for ielement in 0:(num_elements_in_tree - 1)
+ element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement)
+ element_level = t8_element_level(eclass_scheme, element)
+
+ element_length = t8_quad_len(element_level) / t8_quad_root_len
+
+ element_coords = Array{Float64}(undef, 3)
+ t8_element_vertex_reference_coords(eclass_scheme, element, 0,
+ pointer(element_coords))
+
+ nodes_out_x = 2 *
+ (element_length * 1 / 2 * (nodes .+ 1) .+ element_coords[1]) .-
+ 1
+ nodes_out_y = 2 *
+ (element_length * 1 / 2 * (nodes .+ 1) .+ element_coords[2]) .-
+ 1
+
+ polynomial_interpolation_matrix!(matrix1, mesh.nodes, nodes_out_x,
+ baryweights_in)
+ polynomial_interpolation_matrix!(matrix2, mesh.nodes, nodes_out_y,
+ baryweights_in)
+
+ multiply_dimensionwise!(view(node_coordinates, :, :, :, current_index += 1),
+ matrix1, matrix2,
+ view(mesh.tree_node_coordinates, :, :, :,
+ itree + 1),
+ tmp1)
+ end
+ end
+
+ return node_coordinates
+end
+end # @muladd
diff --git a/src/solvers/dgsem_t8code/dg.jl b/src/solvers/dgsem_t8code/dg.jl
new file mode 100644
index 00000000000..16a9d7d35b1
--- /dev/null
+++ b/src/solvers/dgsem_t8code/dg.jl
@@ -0,0 +1,31 @@
+@muladd begin
+#! format: noindent
+
+# This method is called when a SemidiscretizationHyperbolic is constructed.
+# It constructs the basic `cache` used throughout the simulation to compute
+# the RHS etc.
+function create_cache(mesh::T8codeMesh, equations::AbstractEquations, dg::DG, ::Any,
+ ::Type{uEltype}) where {uEltype <: Real}
+ count_required_surfaces!(mesh)
+
+ elements = init_elements(mesh, equations, dg.basis, uEltype)
+ interfaces = init_interfaces(mesh, equations, dg.basis, elements)
+ boundaries = init_boundaries(mesh, equations, dg.basis, elements)
+ mortars = init_mortars(mesh, equations, dg.basis, elements)
+
+ trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries,
+ mesh.boundary_names)
+
+ cache = (; elements, interfaces, boundaries, mortars)
+
+ # Add specialized parts of the cache required to compute the volume integral etc.
+ cache = (; cache...,
+ create_cache(mesh, equations, dg.volume_integral, dg, uEltype)...)
+ cache = (; cache..., create_cache(mesh, equations, dg.mortar, uEltype)...)
+
+ return cache
+end
+
+include("containers.jl")
+include("containers_2d.jl")
+end # @muladd
diff --git a/src/solvers/dgsem_tree/dg_2d.jl b/src/solvers/dgsem_tree/dg_2d.jl
index 6c5e0cee0cf..c30d0a8e01a 100644
--- a/src/solvers/dgsem_tree/dg_2d.jl
+++ b/src/solvers/dgsem_tree/dg_2d.jl
@@ -37,14 +37,14 @@ end
# The methods below are specialized on the volume integral type
# and called from the basic `create_cache` method at the top.
function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
equations, volume_integral::VolumeIntegralFluxDifferencing,
dg::DG, uEltype)
NamedTuple()
end
function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}}, equations,
+ P4estMesh{2}, T8codeMesh{2}}, equations,
volume_integral::VolumeIntegralShockCapturingHG, dg::DG, uEltype)
element_ids_dg = Int[]
element_ids_dgfv = Int[]
@@ -70,7 +70,7 @@ function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMe
end
function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}}, equations,
+ P4estMesh{2}, T8codeMesh{2}}, equations,
volume_integral::VolumeIntegralPureLGLFiniteVolume, dg::DG,
uEltype)
A3dp1_x = Array{uEltype, 3}
@@ -92,7 +92,7 @@ end
# The methods below are specialized on the mortar type
# and called from the basic `create_cache` method at the top.
function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}},
+ P4estMesh{2}, T8codeMesh{2}},
equations, mortar_l2::LobattoLegendreMortarL2, uEltype)
# TODO: Taal performance using different types
MA2d = MArray{Tuple{nvariables(equations), nnodes(mortar_l2)}, uEltype, 2,
@@ -110,7 +110,7 @@ end
# TODO: Taal discuss/refactor timer, allowing users to pass a custom timer?
function rhs!(du, u, t,
- mesh::Union{TreeMesh{2}, P4estMesh{2}}, equations,
+ mesh::Union{TreeMesh{2}, P4estMesh{2}, T8codeMesh{2}}, equations,
initial_condition, boundary_conditions, source_terms::Source,
dg::DG, cache) where {Source}
# Reset du
@@ -180,7 +180,8 @@ end
function calc_volume_integral!(du, u,
mesh::Union{TreeMesh{2}, StructuredMesh{2},
- UnstructuredMesh2D, P4estMesh{2}},
+ UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
nonconservative_terms, equations,
volume_integral::VolumeIntegralWeakForm,
dg::DGSEM, cache)
@@ -226,7 +227,8 @@ end
# from the evaluation of the physical fluxes in each Cartesian direction
function calc_volume_integral!(du, u,
mesh::Union{TreeMesh{2}, StructuredMesh{2},
- UnstructuredMesh2D, P4estMesh{2}},
+ UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
nonconservative_terms, equations,
volume_integral::VolumeIntegralFluxDifferencing,
dg::DGSEM, cache)
@@ -322,7 +324,8 @@ end
# TODO: Taal dimension agnostic
function calc_volume_integral!(du, u,
mesh::Union{TreeMesh{2}, StructuredMesh{2},
- UnstructuredMesh2D, P4estMesh{2}},
+ UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
nonconservative_terms, equations,
volume_integral::VolumeIntegralShockCapturingHG,
dg::DGSEM, cache)
@@ -381,7 +384,8 @@ end
@inline function fv_kernel!(du, u,
mesh::Union{TreeMesh{2}, StructuredMesh{2},
- UnstructuredMesh2D, P4estMesh{2}},
+ UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}
+ },
nonconservative_terms, equations,
volume_flux_fv, dg::DGSEM, cache, element, alpha = true)
@unpack fstar1_L_threaded, fstar1_R_threaded, fstar2_L_threaded, fstar2_R_threaded = cache
diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl
index f7c78547174..2f34e0eb661 100644
--- a/src/solvers/dgsem_tree/indicators_2d.jl
+++ b/src/solvers/dgsem_tree/indicators_2d.jl
@@ -208,7 +208,8 @@ end
end
# Diffuse alpha values by setting each alpha to at least 50% of neighboring elements' alpha
-function apply_smoothing!(mesh::Union{TreeMesh{2}, P4estMesh{2}}, alpha, alpha_tmp, dg,
+function apply_smoothing!(mesh::Union{TreeMesh{2}, P4estMesh{2}, T8codeMesh{2}}, alpha,
+ alpha_tmp, dg,
cache)
# Copy alpha values such that smoothing is indpedenent of the element access order
alpha_tmp .= alpha
diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl
index 95dec027a82..7b8dafdddd2 100644
--- a/src/solvers/dgsem_unstructured/dg_2d.jl
+++ b/src/solvers/dgsem_unstructured/dg_2d.jl
@@ -307,14 +307,14 @@ end
# TODO: Taal dimension agnostic
function calc_boundary_flux!(cache, t, boundary_condition::BoundaryConditionPeriodic,
- mesh::Union{UnstructuredMesh2D, P4estMesh},
+ mesh::Union{UnstructuredMesh2D, P4estMesh, T8codeMesh},
equations, surface_integral, dg::DG)
@assert isempty(eachboundary(dg, cache))
end
# Function barrier for type stability
function calc_boundary_flux!(cache, t, boundary_conditions,
- mesh::Union{UnstructuredMesh2D, P4estMesh},
+ mesh::Union{UnstructuredMesh2D, P4estMesh, T8codeMesh},
equations, surface_integral, dg::DG)
@unpack boundary_condition_types, boundary_indices = boundary_conditions
@@ -327,7 +327,8 @@ end
# in a type-stable way using "lispy tuple programming".
function calc_boundary_flux_by_type!(cache, t, BCs::NTuple{N, Any},
BC_indices::NTuple{N, Vector{Int}},
- mesh::Union{UnstructuredMesh2D, P4estMesh},
+ mesh::Union{UnstructuredMesh2D, P4estMesh,
+ T8codeMesh},
equations, surface_integral, dg::DG) where {N}
# Extract the boundary condition type and index vector
boundary_condition = first(BCs)
@@ -350,7 +351,8 @@ end
# terminate the type-stable iteration over tuples
function calc_boundary_flux_by_type!(cache, t, BCs::Tuple{}, BC_indices::Tuple{},
- mesh::Union{UnstructuredMesh2D, P4estMesh},
+ mesh::Union{UnstructuredMesh2D, P4estMesh,
+ T8codeMesh},
equations, surface_integral, dg::DG)
nothing
end
diff --git a/test/runtests.jl b/test/runtests.jl
index f76811dddbf..1d7eefe1fcb 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -4,113 +4,119 @@ using MPI: mpiexec
# run tests on Travis CI in parallel
const TRIXI_TEST = get(ENV, "TRIXI_TEST", "all")
const TRIXI_MPI_NPROCS = clamp(Sys.CPU_THREADS, 2, 3)
-const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3)
+const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3)
@time @testset "Trixi.jl tests" begin
- # This is placed first since tests error out otherwise if `TRIXI_TEST == "all"`,
- # at least on some systems.
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "mpi"
- # Do a dummy `@test true`:
- # If the process errors out the testset would error out as well,
- # cf. https://github.com/JuliaParallel/MPI.jl/pull/391
- @test true
-
- # There are spurious test failures of Trixi.jl with MPI on Windows, see
- # https://github.com/trixi-framework/Trixi.jl/issues/901
- # To reduce their impact, we do not test MPI with coverage on Windows.
- # This reduces the chance to hit a spurious test failure by one half.
- # In addition, it looks like the Linux GitHub runners run out of memory during the 3D tests
- # with coverage, so we currently do not test MPI with coverage on Linux. For more details,
- # see the discussion at https://github.com/trixi-framework/Trixi.jl/pull/1062#issuecomment-1035901020
- cmd = string(Base.julia_cmd())
- coverage = occursin("--code-coverage", cmd) && !occursin("--code-coverage=none", cmd)
- if !(coverage && Sys.iswindows()) && !(coverage && Sys.islinux())
- # We provide a `--heap-size-hint` to avoid/reduce out-of-memory errors during CI testing
- mpiexec() do cmd
- run(`$cmd -n $TRIXI_MPI_NPROCS $(Base.julia_cmd()) --threads=1 --check-bounds=yes --heap-size-hint=1G $(abspath("test_mpi.jl"))`)
- end
- end
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "threaded" || TRIXI_TEST == "threaded_legacy"
- # Do a dummy `@test true`:
- # If the process errors out the testset would error out as well,
- # cf. https://github.com/JuliaParallel/MPI.jl/pull/391
- @test true
-
- run(`$(Base.julia_cmd()) --threads=$TRIXI_NTHREADS --check-bounds=yes --code-coverage=none $(abspath("test_threaded.jl"))`)
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part1"
- include("test_tree_1d.jl")
- include("test_tree_2d_part1.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part2"
- include("test_tree_2d_part2.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part3"
- include("test_tree_2d_part3.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part4"
- include("test_tree_3d_part1.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part5"
- include("test_tree_3d_part2.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part6"
- include("test_tree_3d_part3.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "structured"
- include("test_structured_1d.jl")
- include("test_structured_2d.jl")
- include("test_structured_3d.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "p4est_part1"
- include("test_p4est_2d.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "p4est_part2"
- include("test_p4est_3d.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "unstructured_dgmulti"
- include("test_unstructured_2d.jl")
- include("test_dgmulti_1d.jl")
- include("test_dgmulti_2d.jl")
- include("test_dgmulti_3d.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "parabolic"
- include("test_parabolic_1d.jl")
- include("test_parabolic_2d.jl")
- include("test_parabolic_3d.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "misc_part1"
- include("test_unit.jl")
- include("test_visualization.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "misc_part2"
- include("test_special_elixirs.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "performance_specializations_part1"
- include("test_performance_specializations_2d.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "performance_specializations_part2"
- include("test_performance_specializations_3d.jl")
- end
-
- @time if TRIXI_TEST == "all" || TRIXI_TEST == "paper_self_gravitating_gas_dynamics"
- include("test_paper_self_gravitating_gas_dynamics.jl")
- end
+ # This is placed first since tests error out otherwise if `TRIXI_TEST == "all"`,
+ # at least on some systems.
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "mpi"
+ # Do a dummy `@test true`:
+ # If the process errors out the testset would error out as well,
+ # cf. https://github.com/JuliaParallel/MPI.jl/pull/391
+ @test true
+
+ # There are spurious test failures of Trixi.jl with MPI on Windows, see
+ # https://github.com/trixi-framework/Trixi.jl/issues/901
+ # To reduce their impact, we do not test MPI with coverage on Windows.
+ # This reduces the chance to hit a spurious test failure by one half.
+ # In addition, it looks like the Linux GitHub runners run out of memory during the 3D tests
+ # with coverage, so we currently do not test MPI with coverage on Linux. For more details,
+ # see the discussion at https://github.com/trixi-framework/Trixi.jl/pull/1062#issuecomment-1035901020
+ cmd = string(Base.julia_cmd())
+ coverage = occursin("--code-coverage", cmd) &&
+ !occursin("--code-coverage=none", cmd)
+ if !(coverage && Sys.iswindows()) && !(coverage && Sys.islinux())
+ # We provide a `--heap-size-hint` to avoid/reduce out-of-memory errors during CI testing
+ mpiexec() do cmd
+ run(`$cmd -n $TRIXI_MPI_NPROCS $(Base.julia_cmd()) --threads=1 --check-bounds=yes --heap-size-hint=1G $(abspath("test_mpi.jl"))`)
+ end
+ end
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "threaded" ||
+ TRIXI_TEST == "threaded_legacy"
+ # Do a dummy `@test true`:
+ # If the process errors out the testset would error out as well,
+ # cf. https://github.com/JuliaParallel/MPI.jl/pull/391
+ @test true
+
+ run(`$(Base.julia_cmd()) --threads=$TRIXI_NTHREADS --check-bounds=yes --code-coverage=none $(abspath("test_threaded.jl"))`)
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part1"
+ include("test_tree_1d.jl")
+ include("test_tree_2d_part1.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part2"
+ include("test_tree_2d_part2.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part3"
+ include("test_tree_2d_part3.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part4"
+ include("test_tree_3d_part1.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part5"
+ include("test_tree_3d_part2.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "tree_part6"
+ include("test_tree_3d_part3.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "structured"
+ include("test_structured_1d.jl")
+ include("test_structured_2d.jl")
+ include("test_structured_3d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "p4est_part1"
+ include("test_p4est_2d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "p4est_part2"
+ include("test_p4est_3d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "t8code_part1"
+ include("test_t8code_2d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "unstructured_dgmulti"
+ include("test_unstructured_2d.jl")
+ include("test_dgmulti_1d.jl")
+ include("test_dgmulti_2d.jl")
+ include("test_dgmulti_3d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "parabolic"
+ include("test_parabolic_1d.jl")
+ include("test_parabolic_2d.jl")
+ include("test_parabolic_3d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "misc_part1"
+ include("test_unit.jl")
+ include("test_visualization.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "misc_part2"
+ include("test_special_elixirs.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "performance_specializations_part1"
+ include("test_performance_specializations_2d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "performance_specializations_part2"
+ include("test_performance_specializations_3d.jl")
+ end
+
+ @time if TRIXI_TEST == "all" || TRIXI_TEST == "paper_self_gravitating_gas_dynamics"
+ include("test_paper_self_gravitating_gas_dynamics.jl")
+ end
end
diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl
new file mode 100644
index 00000000000..a424c9df84b
--- /dev/null
+++ b/test/test_t8code_2d.jl
@@ -0,0 +1,182 @@
+module TestExamplesT8codeMesh2D
+
+using Test
+using Trixi
+
+include("test_trixi.jl")
+
+EXAMPLES_DIR = joinpath(examples_dir(), "t8code_2d_dgsem")
+
+# Start with a clean environment: remove Trixi.jl output directory if it exists
+outdir = "out"
+isdir(outdir) && rm(outdir, recursive = true)
+mkdir(outdir)
+
+@testset "T8codeMesh2D" begin
+
+ @trixi_testset "test save_mesh_file" begin
+ @test_throws Exception begin
+ # Save mesh file support will be added in the future. The following
+ # lines of code are here for satisfying code coverage.
+
+ # Create dummy mesh.
+ mesh = T8codeMesh((1, 1), polydeg = 1,
+ mapping = Trixi.coordinates2mapping((-1.0, -1.0), ( 1.0, 1.0)),
+ initial_refinement_level = 1)
+
+ # This call throws an error.
+ Trixi.save_mesh_file(mesh, "dummy")
+ end
+ end
+
+ @trixi_testset "elixir_advection_basic.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"),
+ # Expected errors are exactly the same as with TreeMesh!
+ l2=[8.311947673061856e-6],
+ linf=[6.627000273229378e-5])
+ end
+
+ @trixi_testset "elixir_advection_nonconforming_flag.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_advection_nonconforming_flag.jl"),
+ l2=[3.198940059144588e-5],
+ linf=[0.00030636069494005547])
+ end
+
+ @trixi_testset "elixir_advection_unstructured_flag.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_unstructured_flag.jl"),
+ l2=[0.0005379687442422346],
+ linf=[0.007438525029884735])
+ end
+
+ @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_advection_amr_unstructured_flag.jl"),
+ l2=[0.001993165013217687],
+ linf=[0.032891018571625796],
+ coverage_override=(maxiters = 6,))
+ end
+
+ @trixi_testset "elixir_advection_amr_solution_independent.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_advection_amr_solution_independent.jl"),
+ # Expected errors are exactly the same as with StructuredMesh!
+ l2=[4.949660644033807e-5],
+ linf=[0.0004867846262313763],
+ coverage_override=(maxiters = 6,))
+ end
+
+ @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"),
+ l2=[
+ 0.0034516244508588046,
+ 0.0023420334036925493,
+ 0.0024261923964557187,
+ 0.004731710454271893,
+ ],
+ linf=[
+ 0.04155789011775046,
+ 0.024772109862748914,
+ 0.03759938693042297,
+ 0.08039824959535657,
+ ])
+ end
+
+ @trixi_testset "elixir_euler_free_stream.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"),
+ l2=[
+ 2.063350241405049e-15,
+ 1.8571016296925367e-14,
+ 3.1769447886391905e-14,
+ 1.4104095258528071e-14,
+ ],
+ linf=[1.9539925233402755e-14, 2e-12, 4.8e-12, 4e-12],
+ atol=2.0e-12,)
+ end
+
+ @trixi_testset "elixir_euler_shockcapturing_ec.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_shockcapturing_ec.jl"),
+ l2=[
+ 9.53984675e-02,
+ 1.05633455e-01,
+ 1.05636158e-01,
+ 3.50747237e-01,
+ ],
+ linf=[
+ 2.94357464e-01,
+ 4.07893014e-01,
+ 3.97334516e-01,
+ 1.08142520e+00,
+ ],
+ tspan=(0.0, 1.0))
+ end
+
+ @trixi_testset "elixir_euler_sedov.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"),
+ l2=[
+ 3.76149952e-01,
+ 2.46970327e-01,
+ 2.46970327e-01,
+ 1.28889042e+00,
+ ],
+ linf=[
+ 1.22139001e+00,
+ 1.17742626e+00,
+ 1.17742626e+00,
+ 6.20638482e+00,
+ ],
+ tspan=(0.0, 0.3))
+ end
+
+ @trixi_testset "elixir_shallowwater_source_terms.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"),
+ l2=[
+ 9.168126407325352e-5,
+ 0.0009795410115453788,
+ 0.002546408320320785,
+ 3.941189812642317e-6,
+ ],
+ linf=[
+ 0.0009903782521019089,
+ 0.0059752684687262025,
+ 0.010941106525454103,
+ 1.2129488214718265e-5,
+ ],
+ tspan=(0.0, 0.1))
+ end
+
+ @trixi_testset "elixir_mhd_alfven_wave.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"),
+ l2=[1.0513414461545583e-5, 1.0517900957166411e-6,
+ 1.0517900957304043e-6, 1.511816606372376e-6,
+ 1.0443997728645063e-6, 7.879639064990798e-7,
+ 7.879639065049896e-7, 1.0628631669056271e-6,
+ 4.3382328912336153e-7],
+ linf=[4.255466285174592e-5, 1.0029706745823264e-5,
+ 1.0029706747467781e-5, 1.2122265939010224e-5,
+ 5.4791097160444835e-6, 5.18922042269665e-6,
+ 5.189220422141538e-6, 9.552667261422676e-6,
+ 1.4237578427628152e-6])
+ end
+
+ @trixi_testset "elixir_mhd_rotor.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_rotor.jl"),
+ l2=[0.44211360369891683, 0.8805178316216257, 0.8262710688468049,
+ 0.0,
+ 0.9616090460973586, 0.10386643568745411,
+ 0.15403457366543802, 0.0,
+ 2.8399715649715473e-5],
+ linf=[10.04369305341599, 17.995640564998403, 9.576041548174265,
+ 0.0,
+ 19.429658884314534, 1.3821395681242314, 1.818559351543182,
+ 0.0,
+ 0.002261930217575465],
+ tspan=(0.0, 0.02))
+ end
+end
+
+# Clean up afterwards: delete Trixi.jl output directory
+@test_nowarn rm(outdir, recursive = true)
+
+end # module
diff --git a/test/test_threaded.jl b/test/test_threaded.jl
index 77fa16ad33e..9b30836d0ed 100644
--- a/test/test_threaded.jl
+++ b/test/test_threaded.jl
@@ -235,6 +235,22 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
end
+ @testset "T8codeMesh" begin
+ @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "t8code_2d_dgsem", "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"),
+ l2 = [0.0034516244508588046, 0.0023420334036925493, 0.0024261923964557187, 0.004731710454271893],
+ linf = [0.04155789011775046, 0.024772109862748914, 0.03759938693042297, 0.08039824959535657])
+ end
+
+ @trixi_testset "elixir_eulergravity_convergence.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "t8code_2d_dgsem", "elixir_eulergravity_convergence.jl"),
+ l2 = [0.00024871265138964204, 0.0003370077102132591, 0.0003370077102131964, 0.0007231525513793697],
+ linf = [0.0015813032944647087, 0.0020494288423820173, 0.0020494288423824614, 0.004793821195083758],
+ tspan = (0.0, 0.1))
+ end
+ end
+
+
@testset "DGMulti" begin
@trixi_testset "elixir_euler_weakform.jl (SBP, EC)" begin
@test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", "elixir_euler_weakform.jl"),
From e1e680ca8574acd10daa2e5bc5e1f49e1ce008f9 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Fri, 28 Jul 2023 04:35:33 +0200
Subject: [PATCH 34/40] Enstrophy for 2D Navier-Stokes (#1591)
* Doubly periodic shear layer
* test if prject toml shows up in git diff
* remove chnages
* Enstrophy for 2D Navier-Stokes
---
Project.toml | 2 +-
.../elixir_navierstokes_shear_layer.jl | 71 +++++++++++++++++++
src/callbacks_step/analysis_dg2d.jl | 18 +++++
.../compressible_navier_stokes_2d.jl | 15 ++++
test/test_parabolic_2d.jl | 4 ++
5 files changed, 109 insertions(+), 1 deletion(-)
create mode 100644 examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl
diff --git a/Project.toml b/Project.toml
index db410317851..b3ca99be9ec 100644
--- a/Project.toml
+++ b/Project.toml
@@ -60,8 +60,8 @@ HDF5 = "0.14, 0.15, 0.16"
IfElse = "0.1"
LinearMaps = "2.7, 3.0"
LoopVectorization = "0.12.118"
-Makie = "0.19"
MPI = "0.20"
+Makie = "0.19"
MuladdMacro = "0.2.2"
Octavian = "0.3.5"
OffsetArrays = "1.3"
diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl b/examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl
new file mode 100644
index 00000000000..a7cb2fc89f1
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl
@@ -0,0 +1,71 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the compressible Navier-Stokes equations
+
+# TODO: parabolic; unify names of these accessor functions
+prandtl_number() = 0.72
+mu() = 1.0/3.0 * 10^(-3) # equivalent to Re = 3000
+
+equations = CompressibleEulerEquations2D(1.4)
+equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu=mu(),
+ Prandtl=prandtl_number())
+
+function initial_condition_shear_layer(x, t, equations::CompressibleEulerEquations2D)
+ k = 80
+ delta = 0.05
+ u0 = 1.0
+ Ms = 0.1 # maximum Mach number
+
+ rho = 1.0
+ v1 = x[2] <= 0.5 ? u0*tanh(k*(x[2]*0.5 - 0.25)) : tanh(k*(0.75 -x[2]*0.5))
+ v2 = u0*delta * sin(2*pi*(x[1]*0.5 + 0.25))
+ p = (u0 / Ms)^2 * rho / equations.gamma # scaling to get Ms
+
+ return prim2cons(SVector(rho, v1, v2, p), equations)
+end
+initial_condition = initial_condition_shear_layer
+
+volume_flux = flux_ranocha
+solver = DGSEM(polydeg=3, surface_flux=flux_hllc,
+ volume_integral=VolumeIntegralFluxDifferencing(volume_flux))
+
+coordinates_min = (0.0, 0.0)
+coordinates_max = (1.0, 1.0)
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=4,
+ n_cells_max=100_000)
+
+
+semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic),
+ initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 2.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 50
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=true,
+ extra_analysis_integrals=(energy_kinetic,
+ energy_internal,
+ enstrophy))
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval,)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback)
+
+###############################################################################
+# run the simulation
+
+time_int_tol = 1e-8
+sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
+ ode_default_options()..., callback=callbacks)
+summary_callback() # print the timer summary
\ No newline at end of file
diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl
index 4e456f79872..aecabf0e4b7 100644
--- a/src/callbacks_step/analysis_dg2d.jl
+++ b/src/callbacks_step/analysis_dg2d.jl
@@ -213,6 +213,24 @@ function integrate(func::Func, u,
end
end
+function integrate(func::Func, u,
+ mesh::Union{TreeMesh{2}, P4estMesh{2}},
+ equations, equations_parabolic,
+ dg::DGSEM,
+ cache, cache_parabolic; normalize = true) where {Func}
+ gradients_x, gradients_y = cache_parabolic.gradients
+ integrate_via_indices(u, mesh, equations, dg, cache;
+ normalize = normalize) do u, i, j, element, equations, dg
+ u_local = get_node_vars(u, equations, dg, i, j, element)
+ gradients_1_local = get_node_vars(gradients_x, equations_parabolic, dg, i, j,
+ element)
+ gradients_2_local = get_node_vars(gradients_y, equations_parabolic, dg, i, j,
+ element)
+ return func(u_local, (gradients_1_local, gradients_2_local),
+ equations_parabolic)
+ end
+end
+
function analyze(::typeof(entropy_timederivative), du, u, t,
mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
P4estMesh{2}, T8codeMesh{2}},
diff --git a/src/equations/compressible_navier_stokes_2d.jl b/src/equations/compressible_navier_stokes_2d.jl
index 9b06e0b5abf..a1f11717e69 100644
--- a/src/equations/compressible_navier_stokes_2d.jl
+++ b/src/equations/compressible_navier_stokes_2d.jl
@@ -300,6 +300,21 @@ end
return T
end
+@inline function enstrophy(u, gradients, equations::CompressibleNavierStokesDiffusion2D)
+ # Enstrophy is 0.5 rho ω⋅ω where ω = ∇ × v
+
+ omega = vorticity(u, gradients, equations)
+ return 0.5 * u[1] * omega^2
+end
+
+@inline function vorticity(u, gradients, equations::CompressibleNavierStokesDiffusion2D)
+ # Ensure that we have velocity `gradients` by way of the `convert_gradient_variables` function.
+ _, dv1dx, dv2dx, _ = convert_derivative_to_primitive(u, gradients[1], equations)
+ _, dv1dy, dv2dy, _ = convert_derivative_to_primitive(u, gradients[2], equations)
+
+ return dv2dx - dv1dy
+end
+
# TODO: can we generalize this to MHD?
"""
struct BoundaryConditionNavierStokesWall
diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl
index 471b976e990..57f296b55fe 100644
--- a/test/test_parabolic_2d.jl
+++ b/test/test_parabolic_2d.jl
@@ -136,6 +136,10 @@ isdir(outdir) && rm(outdir, recursive=true)
@trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl" begin
@test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"),
initial_refinement_level = 2, tspan=(0.0, 0.1),
+ analysis_callback = AnalysisCallback(semi, interval=analysis_interval,
+ extra_analysis_integrals=(energy_kinetic,
+ energy_internal,
+ enstrophy)),
l2 = [0.002111672530658797, 0.0034322351490857846, 0.0038742528195910416, 0.012469246082568561],
linf = [0.012006418939223495, 0.035520871209746126, 0.024512747492231427, 0.11191122588756564]
)
From 73e58dc59ad0e06616507b9338c8fe0bee5b99b4 Mon Sep 17 00:00:00 2001
From: Jesse Chan <1156048+jlchan@users.noreply.github.com>
Date: Fri, 28 Jul 2023 23:28:53 -0500
Subject: [PATCH 35/40] remove CI functions that cause preocmpilation errors
(#1593)
---
src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 54 ----------------------
1 file changed, 54 deletions(-)
diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl
index 5370c927e05..6439cad69bb 100644
--- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl
+++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl
@@ -563,60 +563,6 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
return nothing
end
-# # Function barrier for type stability
-# !!! TODO: Figure out why this cannot removed eventhough it exists in the dg_2d_parabolic.jl file
-function calc_boundary_flux_gradients!(cache, t, boundary_conditions, mesh::P4estMesh,
- equations, surface_integral, dg::DG)
- (; boundary_condition_types, boundary_indices) = boundary_conditions
-
- calc_boundary_flux_by_type!(cache, t, boundary_condition_types, boundary_indices,
- Gradient(), mesh, equations, surface_integral, dg)
- return nothing
-end
-
-function calc_boundary_flux_divergence!(cache, t, boundary_conditions, mesh::P4estMesh,
- equations, surface_integral, dg::DG)
- (; boundary_condition_types, boundary_indices) = boundary_conditions
-
- calc_boundary_flux_by_type!(cache, t, boundary_condition_types, boundary_indices,
- Divergence(), mesh, equations, surface_integral, dg)
- return nothing
-end
-
-# Iterate over tuples of boundary condition types and associated indices
-# in a type-stable way using "lispy tuple programming".
-function calc_boundary_flux_by_type!(cache, t, BCs::NTuple{N, Any},
- BC_indices::NTuple{N, Vector{Int}},
- operator_type,
- mesh::P4estMesh,
- equations, surface_integral, dg::DG) where {N}
- # Extract the boundary condition type and index vector
- boundary_condition = first(BCs)
- boundary_condition_indices = first(BC_indices)
- # Extract the remaining types and indices to be processed later
- remaining_boundary_conditions = Base.tail(BCs)
- remaining_boundary_condition_indices = Base.tail(BC_indices)
-
- # process the first boundary condition type
- calc_boundary_flux!(cache, t, boundary_condition, boundary_condition_indices,
- operator_type, mesh, equations, surface_integral, dg)
-
- # recursively call this method with the unprocessed boundary types
- calc_boundary_flux_by_type!(cache, t, remaining_boundary_conditions,
- remaining_boundary_condition_indices,
- operator_type,
- mesh, equations, surface_integral, dg)
-
- return nothing
-end
-
-# terminate the type-stable iteration over tuples
-function calc_boundary_flux_by_type!(cache, t, BCs::Tuple{}, BC_indices::Tuple{},
- operator_type, mesh::P4estMesh, equations,
- surface_integral, dg::DG)
- nothing
-end
-
function calc_boundary_flux!(cache, t,
boundary_condition_parabolic, # works with Dict types
boundary_condition_indices,
From d05f9c5bfc329db3448a7af18bb1c24cfb75deb2 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sun, 30 Jul 2023 08:19:37 +0200
Subject: [PATCH 36/40] run only threaded tests by default (#1592)
---
test/runtests.jl | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/test/runtests.jl b/test/runtests.jl
index 1d7eefe1fcb..1b0c745dbfd 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -1,8 +1,11 @@
using Test
using MPI: mpiexec
-# run tests on Travis CI in parallel
-const TRIXI_TEST = get(ENV, "TRIXI_TEST", "all")
+# We run tests in parallel with CI jobs setting the `TRIXI_TEST` environment
+# variable to determine the subset of tests to execute.
+# By default, we just run the threaded tests since they are relatively cheap
+# and test a good amount of different functionality.
+const TRIXI_TEST = get(ENV, "TRIXI_TEST", "threaded")
const TRIXI_MPI_NPROCS = clamp(Sys.CPU_THREADS, 2, 3)
const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3)
From d208cee2690fb5b1d63a0511ad5f73967d340205 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sun, 30 Jul 2023 09:47:44 +0200
Subject: [PATCH 37/40] set version to v0.5.37
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index b3ca99be9ec..1d06317f53a 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.37-pre"
+version = "0.5.37"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From e76ea3932d875774220811ff0c14c8e966c312bf Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sun, 30 Jul 2023 09:47:57 +0200
Subject: [PATCH 38/40] set development version to v0.5.38-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 1d06317f53a..c22d4b90642 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.37"
+version = "0.5.38-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 6c97c48e53feb9fe372dc020cbd5f3e1e8fef458 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 8 Aug 2023 07:15:15 +0200
Subject: [PATCH 39/40] Bump crate-ci/typos from 1.16.1 to 1.16.2 (#1598)
* Bump crate-ci/typos from 1.16.1 to 1.16.2
Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.1 to 1.16.2.
- [Release notes](https://github.com/crate-ci/typos/releases)
- [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crate-ci/typos/compare/v1.16.1...v1.16.2)
---
updated-dependencies:
- dependency-name: crate-ci/typos
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
* f_sur -> f_surface
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Hendrik Ranocha
---
.github/workflows/SpellCheck.yml | 2 +-
docs/literate/src/files/DGSEM_FluxDiff.jl | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
index f72c3b0947b..a1a429cad97 100644
--- a/.github/workflows/SpellCheck.yml
+++ b/.github/workflows/SpellCheck.yml
@@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v3
- name: Check spelling
- uses: crate-ci/typos@v1.16.1
+ uses: crate-ci/typos@v1.16.2
diff --git a/docs/literate/src/files/DGSEM_FluxDiff.jl b/docs/literate/src/files/DGSEM_FluxDiff.jl
index cf3b0a1dbd4..5ec156ebbe3 100644
--- a/docs/literate/src/files/DGSEM_FluxDiff.jl
+++ b/docs/literate/src/files/DGSEM_FluxDiff.jl
@@ -96,13 +96,13 @@
# \begin{align*}
# J \underline{\dot{u}}(t) &= - M^{-1} B (\underline{f}^* - \underline{f}) - 2D \underline{f}_{vol}(u^-, u^+)\\[5pt]
# &= - M^{-1} B (\underline{f}^* - \underline{f}_{vol}(\underline{u}, \underline{u})) - 2D \underline{f}_{vol}(u^-, u^+)\\[5pt]
-# &= - M^{-1} B \underline{f}_{sur}^* - (2D - M^{-1} B) \underline{f}_{vol}\\[5pt]
-# &= - M^{-1} B \underline{f}_{sur}^* - D_{split} \underline{f}_{vol}
+# &= - M^{-1} B \underline{f}_{surface}^* - (2D - M^{-1} B) \underline{f}_{vol}\\[5pt]
+# &= - M^{-1} B \underline{f}_{surface}^* - D_{split} \underline{f}_{vol}
# \end{align*}
# ```
# This formulation is in a weak form type formulation and can be implemented by using the derivative
# split matrix $D_{split}=(2D-M^{-1}B)$ and two different fluxes. We divide between the surface
-# flux $f=f_{sur}$ used for the numerical flux $f_{sur}^*$ and the already mentioned volume
+# flux $f=f_{surface}$ used for the numerical flux $f_{surface}^*$ and the already mentioned volume
# flux $f_{vol}$ especially for this formulation.
From ddf089271c65d82b466711e59b5f791c0bd21021 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Tue, 8 Aug 2023 10:17:31 +0200
Subject: [PATCH 40/40] Avoid allocations in `boundary flux` for parabolic RHS
(#1594)
* Remove doubled implementations
* kepp main updated with true main
* Avoid allocations in parabolic boundary fluxes
* Correct shear layer IC
* Whitespaces
* Update examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl
Co-authored-by: Hendrik Ranocha
* Update examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl
Co-authored-by: Hendrik Ranocha
---------
Co-authored-by: Hendrik Ranocha
---
examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl | 5 ++++-
examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl | 6 ++++--
examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl | 5 ++++-
src/solvers/dgsem_tree/containers_2d.jl | 6 +++---
src/solvers/dgsem_tree/containers_3d.jl | 8 ++++----
5 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl b/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl
index 36a9f52e39d..b68e9e6c97e 100644
--- a/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl
+++ b/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl
@@ -170,7 +170,10 @@ end
initial_condition = initial_condition_navier_stokes_convergence_test
# BC types
-velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:3])
+velocity_bc_top_bottom = NoSlip() do x, t, equations
+ u = initial_condition_navier_stokes_convergence_test(x, t, equations)
+ return SVector(u[2], u[3])
+end
heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl b/examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl
index a7cb2fc89f1..dd26fd8097b 100644
--- a/examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl
+++ b/examples/tree_2d_dgsem/elixir_navierstokes_shear_layer.jl
@@ -14,14 +14,16 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu=mu(),
Prandtl=prandtl_number())
function initial_condition_shear_layer(x, t, equations::CompressibleEulerEquations2D)
+ # Shear layer parameters
k = 80
delta = 0.05
u0 = 1.0
+
Ms = 0.1 # maximum Mach number
rho = 1.0
- v1 = x[2] <= 0.5 ? u0*tanh(k*(x[2]*0.5 - 0.25)) : tanh(k*(0.75 -x[2]*0.5))
- v2 = u0*delta * sin(2*pi*(x[1]*0.5 + 0.25))
+ v1 = x[2] <= 0.5 ? u0 * tanh(k*(x[2]*0.5 - 0.25)) : u0 * tanh(k*(0.75 -x[2]*0.5))
+ v2 = u0 * delta * sin(2*pi*(x[1]*0.5 + 0.25))
p = (u0 / Ms)^2 * rho / equations.gamma # scaling to get Ms
return prim2cons(SVector(rho, v1, v2, p), equations)
diff --git a/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl
index b32355c48df..ebb0137a1bb 100644
--- a/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl
+++ b/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl
@@ -220,7 +220,10 @@ end
initial_condition = initial_condition_navier_stokes_convergence_test
# BC types
-velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:4])
+velocity_bc_top_bottom = NoSlip() do x, t, equations
+ u = initial_condition_navier_stokes_convergence_test(x, t, equations)
+ return SVector(u[2], u[3], u[4])
+end
heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
diff --git a/src/solvers/dgsem_tree/containers_2d.jl b/src/solvers/dgsem_tree/containers_2d.jl
index 5cf256d3499..d80522d42fd 100644
--- a/src/solvers/dgsem_tree/containers_2d.jl
+++ b/src/solvers/dgsem_tree/containers_2d.jl
@@ -764,10 +764,10 @@ end
# Container data structure (structure-of-arrays style) for DG MPI interfaces
mutable struct MPIInterfaceContainer2D{uEltype <: Real} <: AbstractContainer
- u::Array{uEltype, 4} # [leftright, variables, i, interfaces]
+ u::Array{uEltype, 4} # [leftright, variables, i, interfaces]
local_neighbor_ids::Vector{Int} # [interfaces]
- orientations::Vector{Int} # [interfaces]
- remote_sides::Vector{Int} # [interfaces]
+ orientations::Vector{Int} # [interfaces]
+ remote_sides::Vector{Int} # [interfaces]
# internal `resize!`able storage
_u::Vector{uEltype}
end
diff --git a/src/solvers/dgsem_tree/containers_3d.jl b/src/solvers/dgsem_tree/containers_3d.jl
index 0318946e34d..5fc027ad001 100644
--- a/src/solvers/dgsem_tree/containers_3d.jl
+++ b/src/solvers/dgsem_tree/containers_3d.jl
@@ -520,14 +520,14 @@ end
# Left and right are used *both* for the numbering of the mortar faces *and* for the position of the
# elements with respect to the axis orthogonal to the mortar.
mutable struct L2MortarContainer3D{uEltype <: Real} <: AbstractContainer
- u_upper_left::Array{uEltype, 5} # [leftright, variables, i, j, mortars]
+ u_upper_left::Array{uEltype, 5} # [leftright, variables, i, j, mortars]
u_upper_right::Array{uEltype, 5} # [leftright, variables, i, j, mortars]
- u_lower_left::Array{uEltype, 5} # [leftright, variables, i, j, mortars]
+ u_lower_left::Array{uEltype, 5} # [leftright, variables, i, j, mortars]
u_lower_right::Array{uEltype, 5} # [leftright, variables, i, j, mortars]
- neighbor_ids::Array{Int, 2} # [position, mortars]
+ neighbor_ids::Array{Int, 2} # [position, mortars]
# Large sides: left -> 1, right -> 2
large_sides::Vector{Int} # [mortars]
- orientations::Vector{Int} # [mortars]
+ orientations::Vector{Int} # [mortars]
# internal `resize!`able storage
_u_upper_left::Vector{uEltype}
_u_upper_right::Vector{uEltype}