From 9ac9b3e968b66ba9c28b06391a69a86fd6dd1a5d Mon Sep 17 00:00:00 2001
From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com>
Date: Tue, 7 May 2024 14:43:26 +0200
Subject: [PATCH 01/44] Add tutorial for subcell IDP limiting (#1882)
* First version of tutorial for subcell IDP limiting
* Add paragraph about time integration method
* Redo code as julia comments
* Hopefully fix reference
* Add section about Visualizaton
* Fix typos
* Small changes
* Add explanation about limiter, vol integral and solver
* Add bounds checking section
* fix typos
* Add description in introduction
* Clear out before hohq mesh tutorial
* Adapt tutorial
* Implement suggestions.
* Add section about newton method and correction factor
* Implement suggestion; Fix typos
* Implement suggestions
---
docs/literate/src/files/hohqmesh_tutorial.jl | 1 +
docs/literate/src/files/index.jl | 12 +
.../src/files/subcell_shock_capturing.jl | 287 ++++++++++++++++++
docs/make.jl | 1 +
4 files changed, 301 insertions(+)
create mode 100644 docs/literate/src/files/subcell_shock_capturing.jl
diff --git a/docs/literate/src/files/hohqmesh_tutorial.jl b/docs/literate/src/files/hohqmesh_tutorial.jl
index b19d363c4bf..dd81f47951e 100644
--- a/docs/literate/src/files/hohqmesh_tutorial.jl
+++ b/docs/literate/src/files/hohqmesh_tutorial.jl
@@ -35,6 +35,7 @@
# There is a default example for this mesh type that can be executed by
using Trixi
+rm("out", force = true, recursive = true) #hide #md
redirect_stdio(stdout=devnull, stderr=devnull) do # code that prints annoying stuff we don't want to see here #hide #md
trixi_include(default_example_unstructured())
end #hide #md
diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl
index 5605803db22..fd1d884d47d 100644
--- a/docs/literate/src/files/index.jl
+++ b/docs/literate/src/files/index.jl
@@ -56,6 +56,18 @@
# explained and added to an exemplary simulation of the Sedov blast wave with the 2D compressible Euler
# equations.
+#src Note to developers: Use "{ index }" (but without spaces, see next line) to enable automatic indexing
+# ### [{index} Subcell limiting with the IDP Limiter](@ref subcell_shock_capturing)
+#-
+# Trixi.jl features a subcell-wise limiting strategy utilizing an Invariant Domain-Preserving (IDP)
+# approach. This IDP approach computes a blending factor that balances the high-order
+# discontinuous Galerkin (DG) method with a low-order subcell finite volume (FV) method for each
+# node within an element. This localized approach minimizes the application of dissipation,
+# resulting in less limiting compared to the element-wise strategy. Additionally, the framework
+# supports both local bounds, which are primarily used for shock capturing, and global bounds.
+# The application of global bounds ensures the minimal necessary limiting to meet physical
+# admissibility conditions, such as ensuring the non-negativity of variables.
+
#src Note to developers: Use "{ index }" (but without spaces, see next line) to enable automatic indexing
# ### [{index} Non-periodic boundary conditions](@ref non_periodic_boundaries)
#-
diff --git a/docs/literate/src/files/subcell_shock_capturing.jl b/docs/literate/src/files/subcell_shock_capturing.jl
new file mode 100644
index 00000000000..8b8e49a28a8
--- /dev/null
+++ b/docs/literate/src/files/subcell_shock_capturing.jl
@@ -0,0 +1,287 @@
+#src # Subcell limiting with the IDP Limiter
+
+# In the previous tutorial, the element-wise limiting with [`IndicatorHennemannGassner`](@ref)
+# and [`VolumeIntegralShockCapturingHG`](@ref) was explained. This tutorial contains a short
+# introduction to the idea and implementation of subcell shock capturing approaches in Trixi.jl,
+# which is also based on the DGSEM scheme in flux differencing formulation.
+# Trixi.jl contains the a-posteriori invariant domain-preserving (IDP) limiter which was
+# introduced by [Pazner (2020)](https://doi.org/10.1016/j.cma.2021.113876) and
+# [Rueda-Ramírez, Pazner, Gassner (2022)](https://doi.org/10.1016/j.compfluid.2022.105627).
+# It is a flux-corrected transport-type (FCT) limiter and is implemented using [`SubcellLimiterIDP`](@ref)
+# and [`VolumeIntegralSubcellLimiting`](@ref).
+# Since it is an a-posteriori limiter you have to apply a correction stage after each Runge-Kutta
+# stage. This is done by passing the stage callback [`SubcellLimiterIDPCorrection`](@ref) to the
+# time integration method.
+
+# ## Time integration method
+# As mentioned before, the IDP limiting is an a-posteriori limiter. Its limiting process
+# guarantees the target bounds for an explicit (forward) Euler time step. To still achieve a
+# high-order approximation, the implementation uses strong-stability preserving (SSP) Runge-Kutta
+# methods, which can be written as convex combinations of forward Euler steps.
+# As such, they preserve the convexity of convex functions and functionals, such as the TVD
+# semi-norm and the maximum principle in 1D, for instance.
+#-
+# Since IDP/FCT limiting procedure operates on independent forward Euler steps, its
+# a-posteriori correction stage is implemented as a stage callback that is triggered after each
+# forward Euler step in an SSP Runge-Kutta method. Unfortunately, the `solve(...)` routines in
+# [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl), typically employed for time
+# integration in Trixi.jl, do not support this type of stage callback.
+#-
+# Therefore, subcell limiting with the IDP limiter requires the use of a Trixi-intern
+# time integration SSPRK method called with
+# ````julia
+# Trixi.solve(ode, method(stage_callbacks = stage_callbacks); ...)
+# ````
+#-
+# Right now, only the canonical three-stage, third-order SSPRK method (Shu-Osher)
+# [`Trixi.SimpleSSPRK33`](@ref) is implemented.
+
+# # [IDP Limiting](@id IDPLimiter)
+# The implementation of the invariant domain preserving (IDP) limiting approach ([`SubcellLimiterIDP`](@ref))
+# is based on [Pazner (2020)](https://doi.org/10.1016/j.cma.2021.113876) and
+# [Rueda-Ramírez, Pazner, Gassner (2022)](https://doi.org/10.101/j.compfluid.2022.105627).
+# It supports several types of limiting which are enabled by passing parameters individually.
+
+# ### [Global bounds](@id global_bounds)
+# If enabled, the global bounds enforce physical admissibility conditions, such as non-negativity
+# of variables. This can be done for conservative variables, where the limiter is of a one-sided
+# Zalesak-type ([Zalesak, 1979](https://doi.org/10.1016/0021-9991(79)90051-2)), and general
+# non-linear variables, where a Newton-bisection algorithm is used to enforce the bounds.
+
+# The Newton-bisection algorithm is an iterative method and requires some parameters.
+# It uses a fixed maximum number of iteration steps (`max_iterations_newton = 10`) and
+# relative/absolute tolerances (`newton_tolerances = (1.0e-12, 1.0e-14)`). The given values are
+# sufficient in most cases and therefore used as default. Additionally, there is the parameter
+# `gamma_constant_newton`, which can be used to scale the antidiffusive flux for the computation
+# of the blending coefficients of nonlinear variables. The default value is `2 * ndims(equations)`,
+# as it was shown by [Pazner (2020)](https://doi.org/10.1016/j.cma.2021.113876) [Section 4.2.2.]
+# that this value guarantees the fulfillment of bounds for a forward-Euler increment.
+
+# Very small non-negative values can be an issue as well. That's why we use an additional
+# correction factor in the calculation of the global bounds,
+# ```math
+# u^{new} \geq \beta * u^{FV}.
+# ```
+# By default, $\beta$ (named `positivity_correction_factor`) is set to `0.1` which works properly
+# in most of the tested setups.
+
+# #### Conservative variables
+# The procedure to enforce global bounds for a conservative variables is as follows:
+# If you want to guarantee non-negativity for the density of the compressible Euler equations,
+# you pass the specific quantity name of the conservative variable.
+using Trixi
+equations = CompressibleEulerEquations2D(1.4)
+
+# The quantity name of the density is `rho` which is how we enable its limiting.
+positivity_variables_cons = ["rho"]
+
+# The quantity names are passed as a vector to allow several quantities.
+# This is used, for instance, if you want to limit the density of two different components using
+# the multicomponent compressible Euler equations.
+equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.648),
+ gas_constants = (0.287, 1.578))
+
+# Then, we just pass both quantity names.
+positivity_variables_cons = ["rho1", "rho2"]
+
+# Alternatively, it is possible to all limit all density variables with a general command using
+positivity_variables_cons = ["rho" * string(i) for i in eachcomponent(equations)]
+
+# #### Non-linear variables
+# To allow limitation for all possible non-linear variables, including variables defined
+# on-the-fly, you can directly pass the function that computes the quantity for which you want
+# to enforce positivity. For instance, if you want to enforce non-negativity for the pressure,
+# do as follows.
+positivity_variables_nonlinear = [pressure]
+
+# ### Local bounds
+# Second, Trixi.jl supports the limiting with local bounds for conservative variables using a
+# two-sided Zalesak-type limiter ([Zalesak, 1979](https://doi.org/10.1016/0021-9991(79)90051-2)).
+# They allow to avoid spurious oscillations within the global bounds and to improve the
+# shock-capturing capabilities of the method. The corresponding numerical admissibility conditions
+# are frequently formulated as local maximum or minimum principles. The local bounds are computed
+# using the maximum and minimum values of all local neighboring nodes. Within this calculation we
+# use the low-order FV solution values for each node.
+
+# As for the limiting with global bounds you are passing the quantity names of the conservative
+# variables you want to limit. So, to limit the density with lower and upper local bounds pass
+# the following.
+local_minmax_variables_cons = ["rho"]
+
+# ## Exemplary simulation
+# How to set up a simulation using the IDP limiting becomes clearer when looking at an exemplary
+# setup. This will be a simplified version of `tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell.jl`.
+# Since the setup is mostly very similar to a pure DGSEM setup as in
+# `tree_2d_dgsem/elixir_euler_blast_wave.jl`, the equivalent parts are used without any explanation
+# here.
+using OrdinaryDiffEq
+using Trixi
+
+equations = CompressibleEulerEquations2D(1.4)
+
+function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D)
+ ## Modified From Hennemann & Gassner JCP paper 2020 (Sec. 6.3) -> "medium blast wave"
+ ## Set up polar coordinates
+ inicenter = SVector(0.0, 0.0)
+ x_norm = x[1] - inicenter[1]
+ y_norm = x[2] - inicenter[2]
+ r = sqrt(x_norm^2 + y_norm^2)
+ phi = atan(y_norm, x_norm)
+ sin_phi, cos_phi = sincos(phi)
+
+ ## Calculate primitive variables
+ rho = r > 0.5 ? 1.0 : 1.1691
+ v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi
+ v2 = r > 0.5 ? 0.0 : 0.1882 * sin_phi
+ p = r > 0.5 ? 1.0E-3 : 1.245
+
+ return prim2cons(SVector(rho, v1, v2, p), equations)
+end
+initial_condition = initial_condition_blast_wave;
+
+# Since the surface integral is equal for both the DG and the subcell FV method, the limiting is
+# applied only in the volume integral.
+#-
+# Note, that the DG method is based on the flux differencing formulation. Hence, you have to use a
+# two-point flux, such as [`flux_ranocha`](@ref), [`flux_shima_etal`](@ref), [`flux_chandrashekar`](@ref)
+# or [`flux_kennedy_gruber`](@ref), for the DG volume flux.
+surface_flux = flux_lax_friedrichs
+volume_flux = flux_ranocha
+
+# The limiter is implemented within [`SubcellLimiterIDP`](@ref). It always requires the
+# parameters `equations` and `basis`. With additional parameters (described [above](@ref IDPLimiter)
+# or listed in the docstring) you can specify and enable additional limiting options.
+# Here, the simulation should contain local limiting for the density using lower and upper bounds.
+basis = LobattoLegendreBasis(3)
+limiter_idp = SubcellLimiterIDP(equations, basis;
+ local_minmax_variables_cons = ["rho"])
+
+# The initialized limiter is passed to `VolumeIntegralSubcellLimiting` in addition to the volume
+# fluxes of the low-order and high-order scheme.
+volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
+ volume_flux_dg = volume_flux,
+ volume_flux_fv = surface_flux)
+
+# Then, the volume integral is passed to `solver` as it is done for the standard flux-differencing
+# DG scheme or the element-wise limiting.
+solver = DGSEM(basis, surface_flux, volume_integral)
+#-
+coordinates_min = (-2.0, -2.0)
+coordinates_max = (2.0, 2.0)
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level = 5,
+ n_cells_max = 10_000)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+tspan = (0.0, 2.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 1000,
+ save_initial_solution = true,
+ save_final_solution = true,
+ solution_variables = cons2prim)
+
+stepsize_callback = StepsizeCallback(cfl = 0.3)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ save_solution,
+ stepsize_callback);
+
+# As explained above, the IDP limiter works a-posteriori and requires the additional use of a
+# correction stage implemented with the stage callback [`SubcellLimiterIDPCorrection`](@ref).
+# This callback is passed within a tuple to the time integration method.
+stage_callbacks = (SubcellLimiterIDPCorrection(),)
+
+# Moreover, as mentioned before as well, simulations with subcell limiting require a Trixi-intern
+# SSPRK time integration methods with passed stage callbacks and a Trixi-intern `Trixi.solve(...)`
+# routine.
+sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks);
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ callback = callbacks);
+summary_callback() # print the timer summary
+
+
+# ## Visualization
+# As for a standard simulation in Trixi.jl, it is possible to visualize the solution using the
+# `plot` routine from Plots.jl.
+using Plots
+plot(sol)
+
+# To get an additional look at the amount of limiting that is used, you can use the visualization
+# approach using the [`SaveSolutionCallback`](@ref), [`Trixi2Vtk`](https://github.com/trixi-framework/Trixi2Vtk.jl)
+# and [ParaView](https://www.paraview.org/download/). More details about this procedure
+# can be found in the [visualization documentation](@ref visualization).
+# Unfortunately, the support for subcell limiting data is not yet merged into the main branch
+# of Trixi2Vtk but lies in the branch [`bennibolm/node-variables`](https://github.com/bennibolm/Trixi2Vtk.jl/tree/node-variables).
+#-
+# With that implementation and the standard procedure used for Trixi2Vtk you get the following
+# dropdown menu in ParaView.
+#-
+# ![ParaView_Dropdownmenu](https://github.com/trixi-framework/Trixi.jl/assets/74359358/70d15f6a-059b-4349-8291-68d9ab3af43e)
+
+# The resulting visualization of the density and the limiting parameter then looks like this.
+# ![blast_wave_paraview](https://github.com/trixi-framework/Trixi.jl/assets/74359358/e5808bed-c8ab-43bf-af7a-050fe43dd630)
+
+# You can see that the limiting coefficient does not lie in the interval [0,1] because Trixi2Vtk
+# interpolates all quantities to regular nodes by default.
+# You can disable this functionality with `reinterpolate=false` within the call of `trixi2vtk(...)`
+# and get the following visualization.
+# ![blast_wave_paraview_reinterpolate=false](https://github.com/trixi-framework/Trixi.jl/assets/74359358/39274f18-0064-469c-b4da-bac4b843e116)
+
+
+# ## Bounds checking
+# Subcell limiting is based on the fulfillment of target bounds - either global or local.
+# Although the implementation works and has been thoroughly tested, there are some cases where
+# these bounds are not met.
+# For instance, the deviations could be in machine precision, which is not problematic.
+# Larger deviations can be cause by too large time-step sizes (which can be easily fixed by
+# reducing the CFL number), specific boundary conditions or source terms. Insufficient parameters
+# for the Newton-bisection algorithm can also be a reason when limiting non-linear variables.
+# There are described [above](@ref global_bounds).
+#-
+# In many cases, it is reasonable to monitor the bounds deviations.
+# Because of that, Trixi.jl supports a bounds checking routine implemented using the stage
+# callback [`BoundsCheckCallback`](@ref). It checks all target bounds for fulfillment
+# in every RK stage. If added to the tuple of stage callbacks like
+# ````julia
+# stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback())
+# ````
+# and passed to the time integration method, a summary is added to the final console output.
+# For the given example, this summary shows that all bounds are met at all times.
+# ````
+# ────────────────────────────────────────────────────────────────────────────────────────────────────
+# Maximum deviation from bounds:
+# ────────────────────────────────────────────────────────────────────────────────────────────────────
+# rho:
+# - lower bound: 0.0
+# - upper bound: 0.0
+# ────────────────────────────────────────────────────────────────────────────────────────────────────
+# ````
+
+# Moreover, it is also possible to monitor the bounds deviations incurred during the simulations.
+# To do that use the parameter `save_errors = true`, such that the instant deviations are written
+# to `deviations.txt` in `output_directory` every `interval` time steps.
+# ````julia
+# BoundsCheckCallback(save_errors = true, output_directory = "out", interval = 100)
+# ````
+# Then, for the given example the deviations file contains all daviations for the current
+# timestep and simulation time.
+# ````
+# iter, simu_time, rho_min, rho_max
+# 1, 0.0, 0.0, 0.0
+# 101, 0.29394033217556337, 0.0, 0.0
+# 201, 0.6012597465597065, 0.0, 0.0
+# 301, 0.9559096690030839, 0.0, 0.0
+# 401, 1.3674274981949077, 0.0, 0.0
+# 501, 1.8395301696603052, 0.0, 0.0
+# 532, 1.9974179806990118, 0.0, 0.0
+# ````
diff --git a/docs/make.jl b/docs/make.jl
index 30213238d31..50251024262 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -77,6 +77,7 @@ files = [
"Introduction to DG methods" => "scalar_linear_advection_1d.jl",
"DGSEM with flux differencing" => "DGSEM_FluxDiff.jl",
"Shock capturing with flux differencing and stage limiter" => "shock_capturing.jl",
+ "Subcell limiting with the IDP Limiter" => "subcell_shock_capturing.jl",
"Non-periodic boundaries" => "non_periodic_boundaries.jl",
"DG schemes via `DGMulti` solver" => "DGMulti_1.jl",
"Other SBP schemes (FD, CGSEM) via `DGMulti` solver" => "DGMulti_2.jl",
From 206c3a6c287009a612b639e33ef4faa7d8bdd2fd Mon Sep 17 00:00:00 2001
From: Jesse Chan <1156048+jlchan@users.noreply.github.com>
Date: Tue, 7 May 2024 11:11:38 -0500
Subject: [PATCH 02/44] Intermediate PR for #1930 (#1931) (#1932)
* add 1D Gauss tensor product functionality
* add 1D kronecker product fallback
* add Burgers' shock capturing example
* add test
* format
* Update examples/dgmulti_1d/elixir_burgers_gauss_shock_capturing.jl
* add DOI
---------
Co-authored-by: Hendrik Ranocha
Co-authored-by: Hendrik Ranocha
---
.../elixir_burgers_gauss_shock_capturing.jl | 68 +++++++++++++++++++
src/equations/inviscid_burgers_1d.jl | 1 +
.../dgmulti/flux_differencing_gauss_sbp.jl | 40 ++++++++++-
src/solvers/dgmulti/types.jl | 3 +
test/test_dgmulti_1d.jl | 16 +++++
5 files changed, 127 insertions(+), 1 deletion(-)
create mode 100644 examples/dgmulti_1d/elixir_burgers_gauss_shock_capturing.jl
diff --git a/examples/dgmulti_1d/elixir_burgers_gauss_shock_capturing.jl b/examples/dgmulti_1d/elixir_burgers_gauss_shock_capturing.jl
new file mode 100644
index 00000000000..b0632b1f978
--- /dev/null
+++ b/examples/dgmulti_1d/elixir_burgers_gauss_shock_capturing.jl
@@ -0,0 +1,68 @@
+using Trixi
+using OrdinaryDiffEq
+
+equations = InviscidBurgersEquation1D()
+
+###############################################################################
+# setup the GSBP DG discretization that uses the Gauss operators from
+# Chan, Del Rey Fernandez, Carpenter (2019).
+# [https://doi.org/10.1137/18M1209234](https://doi.org/10.1137/18M1209234)
+
+surface_flux = flux_lax_friedrichs
+volume_flux = flux_ec
+
+polydeg = 3
+basis = DGMultiBasis(Line(), polydeg, approximation_type = GaussSBP())
+
+indicator_sc = IndicatorHennemannGassner(equations, basis,
+ alpha_max = 0.5,
+ alpha_min = 0.001,
+ alpha_smooth = true,
+ variable = first)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg = volume_flux,
+ volume_flux_fv = surface_flux)
+
+dg = DGMulti(basis,
+ surface_integral = SurfaceIntegralWeakForm(surface_flux),
+ volume_integral = volume_integral)
+
+###############################################################################
+# setup the 1D mesh
+
+cells_per_dimension = (32,)
+mesh = DGMultiMesh(dg, cells_per_dimension,
+ coordinates_min = (-1.0,), coordinates_max = (1.0,),
+ periodicity = true)
+
+###############################################################################
+# setup the semidiscretization and ODE problem
+
+semi = SemidiscretizationHyperbolic(mesh,
+ equations,
+ initial_condition_convergence_test,
+ dg)
+
+tspan = (0.0, 2.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# setup the callbacks
+
+# prints a summary of the simulation setup and resets the timers
+summary_callback = SummaryCallback()
+
+# analyse the solution in regular intervals and prints the results
+analysis_callback = AnalysisCallback(semi, interval = 100, uEltype = real(dg))
+
+# handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl = 0.5)
+
+# collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback)
+
+# ###############################################################################
+# # run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, save_everystep = false, callback = callbacks);
diff --git a/src/equations/inviscid_burgers_1d.jl b/src/equations/inviscid_burgers_1d.jl
index f2387f26ba7..130196a4929 100644
--- a/src/equations/inviscid_burgers_1d.jl
+++ b/src/equations/inviscid_burgers_1d.jl
@@ -168,6 +168,7 @@ end
# Convert conservative variables to entropy variables
@inline cons2entropy(u, equation::InviscidBurgersEquation1D) = u
+@inline entropy2cons(u, equation::InviscidBurgersEquation1D) = u
# Calculate entropy for a conservative state `cons`
@inline entropy(u::Real, ::InviscidBurgersEquation1D) = 0.5 * u^2
diff --git a/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl b/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl
index 9059caf87f6..63a37f6780b 100644
--- a/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl
+++ b/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl
@@ -51,6 +51,29 @@ struct TensorProductGaussFaceOperator{NDIMS, OperatorType <: AbstractGaussOperat
nfaces::Int
end
+function TensorProductGaussFaceOperator(operator::AbstractGaussOperator,
+ dg::DGMulti{1, Line, GaussSBP})
+ rd = dg.basis
+
+ rq1D, wq1D = StartUpDG.gauss_quad(0, 0, polydeg(dg))
+ interp_matrix_gauss_to_face_1d = polynomial_interpolation_matrix(rq1D, [-1; 1])
+
+ nnodes_1d = length(rq1D)
+ face_indices_tensor_product = nothing # not needed in 1D; we fall back to mul!
+
+ num_faces = 2
+
+ T_op = typeof(operator)
+ Tm = typeof(interp_matrix_gauss_to_face_1d)
+ Tw = typeof(inv.(wq1D))
+ Tf = typeof(rd.wf)
+ Ti = typeof(face_indices_tensor_product)
+ return TensorProductGaussFaceOperator{1, T_op, Tm, Tw, Tf, Ti}(interp_matrix_gauss_to_face_1d,
+ inv.(wq1D), rd.wf,
+ face_indices_tensor_product,
+ nnodes_1d, num_faces)
+end
+
# constructor for a 2D operator
function TensorProductGaussFaceOperator(operator::AbstractGaussOperator,
dg::DGMulti{2, Quad, GaussSBP})
@@ -126,6 +149,21 @@ end
end
end
+@inline function tensor_product_gauss_face_operator!(out::AbstractVector,
+ A::TensorProductGaussFaceOperator{1,
+ Interpolation},
+ x::AbstractVector)
+ mul!(out, A.interp_matrix_gauss_to_face_1d, x)
+end
+
+@inline function tensor_product_gauss_face_operator!(out::AbstractVector,
+ A::TensorProductGaussFaceOperator{1,
+ <:Projection},
+ x::AbstractVector)
+ mul!(out, A.interp_matrix_gauss_to_face_1d', x)
+ @. out *= A.inv_volume_weights_1d
+end
+
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
# Since these FMAs can increase the performance of many numerical algorithms,
# we need to opt-in explicitly.
@@ -352,7 +390,7 @@ end
# For now, this is mostly the same as `create_cache` for DGMultiFluxDiff{<:Polynomial}.
# In the future, we may modify it so that we can specialize additional parts of GaussSBP() solvers.
function create_cache(mesh::DGMultiMesh, equations,
- dg::DGMultiFluxDiff{<:GaussSBP, <:Union{Quad, Hex}}, RealT,
+ dg::DGMultiFluxDiff{<:GaussSBP, <:Union{Line, Quad, Hex}}, RealT,
uEltype)
# call general Polynomial flux differencing constructor
diff --git a/src/solvers/dgmulti/types.jl b/src/solvers/dgmulti/types.jl
index 813bc67061e..ef9d7d2bf09 100644
--- a/src/solvers/dgmulti/types.jl
+++ b/src/solvers/dgmulti/types.jl
@@ -347,6 +347,9 @@ function SimpleKronecker(NDIMS, A, eltype_A = eltype(A))
return SimpleKronecker{NDIMS, typeof(A), typeof(tmp_storage)}(A, tmp_storage)
end
+# fall back to mul! for a 1D Kronecker product
+LinearAlgebra.mul!(b, A_kronecker::SimpleKronecker{1}, x) = mul!(b, A_kronecker.A, x)
+
# Computes `b = kron(A, A) * x` in an optimized fashion
function LinearAlgebra.mul!(b_in, A_kronecker::SimpleKronecker{2}, x_in)
@unpack A = A_kronecker
diff --git a/test/test_dgmulti_1d.jl b/test/test_dgmulti_1d.jl
index e470de71efb..0d083cf9a72 100644
--- a/test/test_dgmulti_1d.jl
+++ b/test/test_dgmulti_1d.jl
@@ -29,6 +29,22 @@ isdir(outdir) && rm(outdir, recursive = true)
end
end
+@trixi_testset "elixir_burgers_gauss_shock_capturing.jl " begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_burgers_gauss_shock_capturing.jl"),
+ cells_per_dimension=(8,), tspan=(0.0, 0.1),
+ l2=[0.445804588167854],
+ linf=[0.74780611426038])
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
+end
+
@trixi_testset "elixir_euler_flux_diff.jl " begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_flux_diff.jl"),
cells_per_dimension=(16,),
From 1061f31a1f84aa913e0c5c4ded29730e847a2ba9 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 7 May 2024 19:22:17 +0200
Subject: [PATCH 03/44] set version to v0.7.11
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index f4f060a0932..cdc487e7117 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.11-pre"
+version = "0.7.11"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 7fd735f097d065d50cd23207c4daf282fb119785 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 7 May 2024 19:22:28 +0200
Subject: [PATCH 04/44] set development version to v0.7.12-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index cdc487e7117..68f9060b9d3 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.11"
+version = "0.7.12-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 80f3c364e5a4af42faaf530602899bf6eee33ebf Mon Sep 17 00:00:00 2001
From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com>
Date: Wed, 8 May 2024 12:28:51 +0200
Subject: [PATCH 05/44] Add one-sided local subcell IDP limiting for non-linear
variables (#1792)
* Add positivity limiting of non-linear variables
* Revise derivative function call; Add default derivative version
* Adapt test to actually test pos limiter for nonlinear variables
* Add unit test to test default implementation of variable_derivative
* Clean up comments and code
* Rename Newton-bisection variables
* Implement suggestions
* Relocate functions
* Add entropy limiters
* Update test errors after adding entropy limiting
* Fix bug in entropy limiting
* Adapt estimated errors after bug fix
* Remove doubled code
* Rename function
* Generalize one-sided limiting (#125)
* Start to align both entropy limiters
* Adapt calc_bounds_onesided!
* Add wrapper function for entropy limiting
* Rename keys in Dict
* use variable and bound_function as parameter
* Use same function for both entropies
* First working version of general onesided limiting
* Rename minmax limiting to twosided limiting
* Update comment
* Clean up default vector
* Last stuff
* Fix unit test
* fmt
* Fix tests
* Correct order
* Rework docstring
* Rename operator to min_or_max
* Call initial check with min_or_max
* fmt
* Implement suggestions
* Remove type stuff
* Fix allocations due to non-specialized routine
* Add comment to NEWS.md
* Remove whitespaces
* Implement suggestions
* Replace `foreach` with `for`
* Fix tests
* Use new bounds check reduction for one sided limiter
* Adapt tests after last merge of main
* Rename `entropy_spec` to `entropy_guermond_etal`
* Remove not-needed `tuple`
* Adapt nameing changes to tutorial
---------
Co-authored-by: Michael Schlottke-Lakemper
---
NEWS.md | 1 +
.../src/files/subcell_shock_capturing.jl | 4 +-
...euler_blast_wave_sc_subcell_nonperiodic.jl | 4 +-
...lixir_euler_sedov_blast_wave_sc_subcell.jl | 4 +-
...ck_bubble_shockcapturing_subcell_minmax.jl | 4 +-
src/callbacks_stage/subcell_bounds_check.jl | 31 ++-
.../subcell_bounds_check_2d.jl | 40 +++-
src/equations/compressible_euler_2d.jl | 44 ++++
src/equations/ideal_glm_mhd_2d.jl | 4 +-
src/solvers/dgsem_tree/subcell_limiters.jl | 102 +++++---
src/solvers/dgsem_tree/subcell_limiters_2d.jl | 218 +++++++++++++++---
test/test_tree_2d_euler.jl | 32 +--
test/test_unit.jl | 5 +-
13 files changed, 394 insertions(+), 99 deletions(-)
diff --git a/NEWS.md b/NEWS.md
index 88afec79987..0b4279e9e7a 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -12,6 +12,7 @@ for human readability.
- Implementation of 1D Linearized Euler Equations.
- New analysis callback for 2D `P4estMesh` to compute integrated quantities along a boundary surface, e.g., pressure lift and drag coefficients.
- Optional tuple parameter for `GlmSpeedCallback` called `semi_indices` to specify for which semidiscretization of a `SemidiscretizationCoupled` we need to update the GLM speed.
+- Subcell local one-sided limiting support for nonlinear variables in 2D for `TreeMesh`.
## Changes when updating to v0.7 from v0.6.x
diff --git a/docs/literate/src/files/subcell_shock_capturing.jl b/docs/literate/src/files/subcell_shock_capturing.jl
index 8b8e49a28a8..8a98fdae283 100644
--- a/docs/literate/src/files/subcell_shock_capturing.jl
+++ b/docs/literate/src/files/subcell_shock_capturing.jl
@@ -106,7 +106,7 @@ positivity_variables_nonlinear = [pressure]
# As for the limiting with global bounds you are passing the quantity names of the conservative
# variables you want to limit. So, to limit the density with lower and upper local bounds pass
# the following.
-local_minmax_variables_cons = ["rho"]
+local_twosided_variables_cons = ["rho"]
# ## Exemplary simulation
# How to set up a simulation using the IDP limiting becomes clearer when looking at an exemplary
@@ -154,7 +154,7 @@ volume_flux = flux_ranocha
# Here, the simulation should contain local limiting for the density using lower and upper bounds.
basis = LobattoLegendreBasis(3)
limiter_idp = SubcellLimiterIDP(equations, basis;
- local_minmax_variables_cons = ["rho"])
+ local_twosided_variables_cons = ["rho"])
# The initialized limiter is passed to `VolumeIntegralSubcellLimiting` in addition to the volume
# fluxes of the low-order and high-order scheme.
diff --git a/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl b/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl
index 209aa2ae352..00d3c69f2e6 100644
--- a/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl
+++ b/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl
@@ -41,7 +41,9 @@ surface_flux = flux_lax_friedrichs
volume_flux = flux_ranocha
basis = LobattoLegendreBasis(3)
limiter_idp = SubcellLimiterIDP(equations, basis;
- local_minmax_variables_cons = ["rho"])
+ local_twosided_variables_cons = ["rho"],
+ local_onesided_variables_nonlinear = [(Trixi.entropy_math,
+ max)])
volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
volume_flux_dg = volume_flux,
volume_flux_fv = surface_flux)
diff --git a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
index c1ba3d96962..6cbbe4eb4e6 100644
--- a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
+++ b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
@@ -42,7 +42,9 @@ surface_flux = flux_lax_friedrichs
volume_flux = flux_chandrashekar
basis = LobattoLegendreBasis(3)
limiter_idp = SubcellLimiterIDP(equations, basis;
- local_minmax_variables_cons = ["rho"])
+ local_twosided_variables_cons = ["rho"],
+ local_onesided_variables_nonlinear = [(Trixi.entropy_guermond_etal,
+ min)])
volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
volume_flux_dg = volume_flux,
volume_flux_fv = surface_flux)
diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl
index 4b606502ebe..b2d49ecbd48 100644
--- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl
+++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl
@@ -86,8 +86,8 @@ volume_flux = flux_ranocha
basis = LobattoLegendreBasis(3)
limiter_idp = SubcellLimiterIDP(equations, basis;
- local_minmax_variables_cons = ["rho" * string(i)
- for i in eachcomponent(equations)])
+ local_twosided_variables_cons = ["rho" * string(i)
+ for i in eachcomponent(equations)])
volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
volume_flux_dg = volume_flux,
volume_flux_fv = surface_flux)
diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl
index 4dbf44d29c4..ba193ab2997 100644
--- a/src/callbacks_stage/subcell_bounds_check.jl
+++ b/src/callbacks_stage/subcell_bounds_check.jl
@@ -77,22 +77,27 @@ function init_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimi
return nothing
end
- (; local_minmax, positivity) = limiter
+ (; local_twosided, positivity, local_onesided) = limiter
(; output_directory) = callback
variables = varnames(cons2cons, semi.equations)
mkpath(output_directory)
open("$output_directory/deviations.txt", "a") do f
print(f, "# iter, simu_time")
- if local_minmax
- for v in limiter.local_minmax_variables_cons
+ if local_twosided
+ for v in limiter.local_twosided_variables_cons
variable_string = string(variables[v])
print(f, ", " * variable_string * "_min, " * variable_string * "_max")
end
end
+ if local_onesided
+ for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
+ print(f, ", " * string(variable) * "_" * string(min_or_max))
+ end
+ end
if positivity
for v in limiter.positivity_variables_cons
- if v in limiter.local_minmax_variables_cons
+ if v in limiter.local_twosided_variables_cons
continue
end
print(f, ", " * string(variables[v]) * "_min")
@@ -120,15 +125,15 @@ end
@inline function finalize_callback(callback::BoundsCheckCallback, semi,
limiter::SubcellLimiterIDP)
- (; local_minmax, positivity) = limiter
+ (; local_twosided, positivity, local_onesided) = limiter
(; idp_bounds_delta_global) = limiter.cache
variables = varnames(cons2cons, semi.equations)
println("─"^100)
println("Maximum deviation from bounds:")
println("─"^100)
- if local_minmax
- for v in limiter.local_minmax_variables_cons
+ if local_twosided
+ for v in limiter.local_twosided_variables_cons
v_string = string(v)
println("$(variables[v]):")
println("- lower bound: ",
@@ -137,9 +142,19 @@ end
idp_bounds_delta_global[Symbol(v_string, "_max")])
end
end
+ if local_onesided
+ for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
+ variable_string = string(variable)
+ minmax_string = string(min_or_max)
+ println("$variable_string:")
+ println("- $minmax_string bound: ",
+ idp_bounds_delta_global[Symbol(variable_string, "_",
+ minmax_string)])
+ end
+ end
if positivity
for v in limiter.positivity_variables_cons
- if v in limiter.local_minmax_variables_cons
+ if v in limiter.local_twosided_variables_cons
continue
end
println(string(variables[v]) * ":\n- positivity: ",
diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl
index 3a56ea71f62..0f713a296e2 100644
--- a/src/callbacks_stage/subcell_bounds_check_2d.jl
+++ b/src/callbacks_stage/subcell_bounds_check_2d.jl
@@ -8,7 +8,7 @@
@inline function check_bounds(u, mesh::AbstractMesh{2}, equations, solver, cache,
limiter::SubcellLimiterIDP,
time, iter, output_directory, save_errors)
- (; local_minmax, positivity) = solver.volume_integral.limiter
+ (; local_twosided, positivity, local_onesided) = solver.volume_integral.limiter
(; variable_bounds) = limiter.cache.subcell_limiter_coefficients
(; idp_bounds_delta_local, idp_bounds_delta_global) = limiter.cache
@@ -20,8 +20,8 @@
# `@batch` here to allow a possible redefinition of `@threaded` without creating errors here.
# See also https://github.com/trixi-framework/Trixi.jl/pull/1888#discussion_r1537785293.
- if local_minmax
- for v in limiter.local_minmax_variables_cons
+ if local_twosided
+ for v in limiter.local_twosided_variables_cons
v_string = string(v)
key_min = Symbol(v_string, "_min")
key_max = Symbol(v_string, "_max")
@@ -45,9 +45,28 @@
idp_bounds_delta_local[key_max] = deviation_max
end
end
+ if local_onesided
+ for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
+ key = Symbol(string(variable), "_", string(min_or_max))
+ deviation = idp_bounds_delta_local[key]
+ sign_ = min_or_max(1.0, -1.0)
+ @batch reduction=(max, deviation) for element in eachelement(solver, cache)
+ for j in eachnode(solver), i in eachnode(solver)
+ v = variable(get_node_vars(u, equations, solver, i, j, element),
+ equations)
+ # Note: We always save the absolute deviations >= 0 and therefore use the
+ # `max` operator for lower and upper bounds. The different directions of
+ # upper and lower bounds are considered with `sign_`.
+ deviation = max(deviation,
+ sign_ * (v - variable_bounds[key][i, j, element]))
+ end
+ end
+ idp_bounds_delta_local[key] = deviation
+ end
+ end
if positivity
for v in limiter.positivity_variables_cons
- if v in limiter.local_minmax_variables_cons
+ if v in limiter.local_twosided_variables_cons
continue
end
key = Symbol(string(v), "_min")
@@ -86,16 +105,23 @@
# Print to output file
open("$output_directory/deviations.txt", "a") do f
print(f, iter, ", ", time)
- if local_minmax
- for v in limiter.local_minmax_variables_cons
+ if local_twosided
+ for v in limiter.local_twosided_variables_cons
v_string = string(v)
print(f, ", ", idp_bounds_delta_local[Symbol(v_string, "_min")],
", ", idp_bounds_delta_local[Symbol(v_string, "_max")])
end
end
+ if local_onesided
+ for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
+ print(f, ", ",
+ idp_bounds_delta_local[Symbol(string(variable), "_",
+ string(min_or_max))][stride_size])
+ end
+ end
if positivity
for v in limiter.positivity_variables_cons
- if v in limiter.local_minmax_variables_cons
+ if v in limiter.local_twosided_variables_cons
continue
end
print(f, ", ", idp_bounds_delta_local[Symbol(string(v), "_min")])
diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl
index fa7af285aa4..0614066806c 100644
--- a/src/equations/compressible_euler_2d.jl
+++ b/src/equations/compressible_euler_2d.jl
@@ -1886,6 +1886,27 @@ end
return SVector(w1, w2, w3, w4)
end
+# Transformation from conservative variables u to entropy vector ds_0/du,
+# using the modified specific entropy of Guermond et al. (2019): s_0 = p * rho^(-gamma) / (gamma-1).
+# Note: This is *not* the "conventional" specific entropy s = ln(p / rho^(gamma)).
+@inline function cons2entropy_guermond_etal(u, equations::CompressibleEulerEquations2D)
+ rho, rho_v1, rho_v2, rho_e = u
+
+ v1 = rho_v1 / rho
+ v2 = rho_v2 / rho
+ v_square = v1^2 + v2^2
+ inv_rho_gammap1 = (1 / rho)^(equations.gamma + 1.0)
+
+ # The derivative vector for the modified specific entropy of Guermond et al.
+ w1 = inv_rho_gammap1 *
+ (0.5 * rho * (equations.gamma + 1.0) * v_square - equations.gamma * rho_e)
+ w2 = -rho_v1 * inv_rho_gammap1
+ w3 = -rho_v2 * inv_rho_gammap1
+ w4 = (1 / rho)^equations.gamma
+
+ return SVector(w1, w2, w3, w4)
+end
+
@inline function entropy2cons(w, equations::CompressibleEulerEquations2D)
# See Hughes, Franca, Mallet (1986) A new finite element formulation for CFD
# [DOI: 10.1016/0045-7825(86)90127-1](https://doi.org/10.1016/0045-7825(86)90127-1)
@@ -1991,6 +2012,29 @@ end
return S
end
+# Transformation from conservative variables u to d(s)/d(u)
+@inline function gradient_conservative(::typeof(entropy_math),
+ u, equations::CompressibleEulerEquations2D)
+ return cons2entropy(u, equations)
+end
+
+# Calculate the modified specific entropy of Guermond et al. (2019): s_0 = p * rho^(-gamma) / (gamma-1).
+# Note: This is *not* the "conventional" specific entropy s = ln(p / rho^(gamma)).
+@inline function entropy_guermond_etal(u, equations::CompressibleEulerEquations2D)
+ rho, rho_v1, rho_v2, rho_e = u
+
+ # Modified specific entropy from Guermond et al. (2019)
+ s = (rho_e - 0.5 * (rho_v1^2 + rho_v2^2) / rho) * (1 / rho)^equations.gamma
+
+ return s
+end
+
+# Transformation from conservative variables u to d(s)/d(u)
+@inline function gradient_conservative(::typeof(entropy_guermond_etal),
+ u, equations::CompressibleEulerEquations2D)
+ return cons2entropy_guermond_etal(u, equations)
+end
+
# Default entropy is the mathematical entropy
@inline function entropy(cons, equations::CompressibleEulerEquations2D)
entropy_math(cons, equations)
diff --git a/src/equations/ideal_glm_mhd_2d.jl b/src/equations/ideal_glm_mhd_2d.jl
index 4366cd32f08..622410de855 100644
--- a/src/equations/ideal_glm_mhd_2d.jl
+++ b/src/equations/ideal_glm_mhd_2d.jl
@@ -295,8 +295,8 @@ of local and symmetric parts. It is equivalent to the non-conservative flux of B
et al. (`flux_nonconservative_powell`) for conforming meshes but it yields different
results on non-conforming meshes(!).
-The two other flux functions with the same name return either the local
-or symmetric portion of the non-conservative flux based on the type of the
+The two other flux functions with the same name return either the local
+or symmetric portion of the non-conservative flux based on the type of the
nonconservative_type argument, employing multiple dispatch. They are used to
compute the subcell fluxes in dg_2d_subcell_limiters.jl.
diff --git a/src/solvers/dgsem_tree/subcell_limiters.jl b/src/solvers/dgsem_tree/subcell_limiters.jl
index e433c953779..17c9488316d 100644
--- a/src/solvers/dgsem_tree/subcell_limiters.jl
+++ b/src/solvers/dgsem_tree/subcell_limiters.jl
@@ -14,27 +14,36 @@ end
"""
SubcellLimiterIDP(equations::AbstractEquations, basis;
- local_minmax_variables_cons = String[],
+ local_twosided_variables_cons = String[],
positivity_variables_cons = String[],
positivity_variables_nonlinear = [],
positivity_correction_factor = 0.1,
+ local_onesided_variables_nonlinear = [],
max_iterations_newton = 10,
newton_tolerances = (1.0e-12, 1.0e-14),
gamma_constant_newton = 2 * ndims(equations))
Subcell invariant domain preserving (IDP) limiting used with [`VolumeIntegralSubcellLimiting`](@ref)
including:
-- Local maximum/minimum Zalesak-type limiting for conservative variables (`local_minmax_variables_cons`)
+- Local two-sided Zalesak-type limiting for conservative variables (`local_twosided_variables_cons`)
- Positivity limiting for conservative variables (`positivity_variables_cons`) and nonlinear variables
(`positivity_variables_nonlinear`)
+- Local one-sided limiting for nonlinear variables, e.g. `entropy_guermond_etal` and `entropy_math`
+with `local_onesided_variables_nonlinear`
-Conservative variables to be limited are passed as a vector of strings, e.g. `local_minmax_variables_cons = ["rho"]`
-and `positivity_variables_cons = ["rho"]`. For nonlinear variables the specific functions are
-passed in a vector, e.g. `positivity_variables_nonlinear = [pressure]`.
+To use these three limiting options use the following structure:
+
+***Conservative variables*** to be limited are passed as a vector of strings, e.g.
+`local_twosided_variables_cons = ["rho"]` and `positivity_variables_cons = ["rho"]`.
+For ***nonlinear variables***, the wanted variable functions are passed within a vector: To ensure
+positivity use a plain vector including the desired variables, e.g. `positivity_variables_nonlinear = [pressure]`.
+For local one-sided limiting pass the variable function combined with the requested bound
+(`min` or `max`) as a tuple. For instance, to impose a lower local bound on the modified specific
+entropy by Guermond et al. use `local_onesided_variables_nonlinear = [(Trixi.entropy_guermond_etal, min)]`.
The bounds are calculated using the low-order FV solution. The positivity limiter uses
`positivity_correction_factor` such that `u^new >= positivity_correction_factor * u^FV`.
-The limiting of nonlinear variables uses a Newton-bisection method with a maximum of
+Local and global limiting of nonlinear variables uses a Newton-bisection method with a maximum of
`max_iterations_newton` iterations, relative and absolute tolerances of `newton_tolerances`
and a provisional update constant `gamma_constant_newton` (`gamma_constant_newton>=2*d`,
where `d = #dimensions`). See equation (20) of Pazner (2020) and equation (30) of Rueda-Ramírez et al. (2022).
@@ -55,14 +64,17 @@ where `d = #dimensions`). See equation (20) of Pazner (2020) and equation (30) o
!!! warning "Experimental implementation"
This is an experimental feature and may change in future releases.
"""
-struct SubcellLimiterIDP{RealT <: Real, LimitingVariablesNonlinear, Cache} <:
+struct SubcellLimiterIDP{RealT <: Real, LimitingVariablesNonlinear,
+ LimitingOnesidedVariablesNonlinear, Cache} <:
AbstractSubcellLimiter
- local_minmax::Bool
- local_minmax_variables_cons::Vector{Int} # Local mininum/maximum principles for conservative variables
+ local_twosided::Bool
+ local_twosided_variables_cons::Vector{Int} # Local two-sided limiting for conservative variables
positivity::Bool
positivity_variables_cons::Vector{Int} # Positivity for conservative variables
positivity_variables_nonlinear::LimitingVariablesNonlinear # Positivity for nonlinear variables
positivity_correction_factor::RealT
+ local_onesided::Bool
+ local_onesided_variables_nonlinear::LimitingOnesidedVariablesNonlinear # Local one-sided limiting for nonlinear variables
cache::Cache
max_iterations_newton::Int
newton_tolerances::Tuple{RealT, RealT} # Relative and absolute tolerances for Newton's method
@@ -71,32 +83,56 @@ end
# this method is used when the limiter is constructed as for shock-capturing volume integrals
function SubcellLimiterIDP(equations::AbstractEquations, basis;
- local_minmax_variables_cons = String[],
+ local_twosided_variables_cons = String[],
positivity_variables_cons = String[],
positivity_variables_nonlinear = [],
positivity_correction_factor = 0.1,
+ local_onesided_variables_nonlinear = [],
max_iterations_newton = 10,
newton_tolerances = (1.0e-12, 1.0e-14),
gamma_constant_newton = 2 * ndims(equations))
- local_minmax = (length(local_minmax_variables_cons) > 0)
+ local_twosided = (length(local_twosided_variables_cons) > 0)
+ local_onesided = (length(local_onesided_variables_nonlinear) > 0)
positivity = (length(positivity_variables_cons) +
length(positivity_variables_nonlinear) > 0)
- local_minmax_variables_cons_ = get_variable_index.(local_minmax_variables_cons,
- equations)
+ # When passing `min` or `max` in the elixir, the specific function of Base is used.
+ # To speed up the simulation, we replace it with `Trixi.min` and `Trixi.max` respectively.
+ local_onesided_variables_nonlinear_ = Tuple{Function, Function}[]
+ for (variable, min_or_max) in local_onesided_variables_nonlinear
+ if min_or_max === Base.max
+ push!(local_onesided_variables_nonlinear_, (variable, max))
+ elseif min_or_max === Base.min
+ push!(local_onesided_variables_nonlinear_, (variable, min))
+ elseif min_or_max === Trixi.max || min_or_max === Trixi.min
+ push!(local_onesided_variables_nonlinear_, (variable, min_or_max))
+ else
+ error("Parameter $min_or_max is not a valid input. Use `max` or `min` instead.")
+ end
+ end
+ local_onesided_variables_nonlinear_ = Tuple(local_onesided_variables_nonlinear_)
+
+ local_twosided_variables_cons_ = get_variable_index.(local_twosided_variables_cons,
+ equations)
positivity_variables_cons_ = get_variable_index.(positivity_variables_cons,
equations)
bound_keys = ()
- if local_minmax
- for v in local_minmax_variables_cons_
+ if local_twosided
+ for v in local_twosided_variables_cons_
v_string = string(v)
bound_keys = (bound_keys..., Symbol(v_string, "_min"),
Symbol(v_string, "_max"))
end
end
+ if local_onesided
+ for (variable, min_or_max) in local_onesided_variables_nonlinear_
+ bound_keys = (bound_keys...,
+ Symbol(string(variable), "_", string(min_or_max)))
+ end
+ end
for v in positivity_variables_cons_
- if !(v in local_minmax_variables_cons_)
+ if !(v in local_twosided_variables_cons_)
bound_keys = (bound_keys..., Symbol(string(v), "_min"))
end
end
@@ -108,29 +144,36 @@ function SubcellLimiterIDP(equations::AbstractEquations, basis;
SubcellLimiterIDP{typeof(positivity_correction_factor),
typeof(positivity_variables_nonlinear),
- typeof(cache)}(local_minmax, local_minmax_variables_cons_,
+ typeof(local_onesided_variables_nonlinear_),
+ typeof(cache)}(local_twosided, local_twosided_variables_cons_,
positivity, positivity_variables_cons_,
positivity_variables_nonlinear,
- positivity_correction_factor, cache,
+ positivity_correction_factor,
+ local_onesided,
+ local_onesided_variables_nonlinear_,
+ cache,
max_iterations_newton, newton_tolerances,
gamma_constant_newton)
end
function Base.show(io::IO, limiter::SubcellLimiterIDP)
@nospecialize limiter # reduce precompilation time
- (; local_minmax, positivity) = limiter
+ (; local_twosided, positivity, local_onesided) = limiter
print(io, "SubcellLimiterIDP(")
- if !(local_minmax || positivity)
+ if !(local_twosided || positivity || local_onesided)
print(io, "No limiter selected => pure DG method")
else
features = String[]
- if local_minmax
+ if local_twosided
push!(features, "local min/max")
end
if positivity
push!(features, "positivity")
end
+ if local_onesided
+ push!(features, "local onesided")
+ end
join(io, features, ", ")
print(io, "Limiter=($features), ")
end
@@ -140,19 +183,19 @@ end
function Base.show(io::IO, ::MIME"text/plain", limiter::SubcellLimiterIDP)
@nospecialize limiter # reduce precompilation time
- (; local_minmax, positivity) = limiter
+ (; local_twosided, positivity, local_onesided) = limiter
if get(io, :compact, false)
show(io, limiter)
else
- if !(local_minmax || positivity)
- setup = ["limiter" => "No limiter selected => pure DG method"]
+ if !(local_twosided || positivity || local_onesided)
+ setup = ["Limiter" => "No limiter selected => pure DG method"]
else
- setup = ["limiter" => ""]
- if local_minmax
+ setup = ["Limiter" => ""]
+ if local_twosided
setup = [
setup...,
- "" => "Local maximum/minimum limiting for conservative variables $(limiter.local_minmax_variables_cons)",
+ "" => "Local two-sided limiting for conservative variables $(limiter.local_twosided_variables_cons)",
]
end
if positivity
@@ -163,6 +206,11 @@ function Base.show(io::IO, ::MIME"text/plain", limiter::SubcellLimiterIDP)
"" => "- with positivity correction factor = $(limiter.positivity_correction_factor)",
]
end
+ if local_onesided
+ for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
+ setup = [setup..., "" => "Local $min_or_max limiting for $variable"]
+ end
+ end
setup = [
setup...,
"Local bounds" => "FV solution",
diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
index 9343cee4397..33ae0599748 100644
--- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl
+++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
@@ -37,13 +37,17 @@ function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSE
# TODO: Do not abuse `reset_du!` but maybe implement a generic `set_zero!`
@trixi_timeit timer() "reset alpha" reset_du!(alpha, dg, semi.cache)
- if limiter.local_minmax
- @trixi_timeit timer() "local min/max limiting" idp_local_minmax!(alpha, limiter,
- u, t, dt, semi)
+ if limiter.local_twosided
+ @trixi_timeit timer() "local twosided" idp_local_twosided!(alpha, limiter,
+ u, t, dt, semi)
end
if limiter.positivity
@trixi_timeit timer() "positivity" idp_positivity!(alpha, limiter, u, dt, semi)
end
+ if limiter.local_onesided
+ @trixi_timeit timer() "local onesided" idp_local_onesided!(alpha, limiter,
+ u, t, dt, semi)
+ end
# Calculate alpha1 and alpha2
@unpack alpha1, alpha2 = limiter.cache.subcell_limiter_coefficients
@@ -164,18 +168,121 @@ end
return nothing
end
+@inline function calc_bounds_onesided!(var_minmax, min_or_max, variable, u, t, semi)
+ mesh, equations, dg, cache = mesh_equations_solver_cache(semi)
+ # Calc bounds inside elements
+ @threaded for element in eachelement(dg, cache)
+ # Reset bounds
+ for j in eachnode(dg), i in eachnode(dg)
+ if min_or_max === max
+ var_minmax[i, j, element] = typemin(eltype(var_minmax))
+ else
+ var_minmax[i, j, element] = typemax(eltype(var_minmax))
+ end
+ end
+
+ # Calculate bounds at Gauss-Lobatto nodes using u
+ for j in eachnode(dg), i in eachnode(dg)
+ var = variable(get_node_vars(u, equations, dg, i, j, element), equations)
+ var_minmax[i, j, element] = min_or_max(var_minmax[i, j, element], var)
+
+ if i > 1
+ var_minmax[i - 1, j, element] = min_or_max(var_minmax[i - 1, j,
+ element], var)
+ end
+ if i < nnodes(dg)
+ var_minmax[i + 1, j, element] = min_or_max(var_minmax[i + 1, j,
+ element], var)
+ end
+ if j > 1
+ var_minmax[i, j - 1, element] = min_or_max(var_minmax[i, j - 1,
+ element], var)
+ end
+ if j < nnodes(dg)
+ var_minmax[i, j + 1, element] = min_or_max(var_minmax[i, j + 1,
+ element], var)
+ end
+ end
+ end
+
+ # Values at element boundary
+ calc_bounds_onesided_interface!(var_minmax, min_or_max, variable, u, t, semi, mesh)
+end
+
+@inline function calc_bounds_onesided_interface!(var_minmax, min_or_max, variable, u, t,
+ semi, mesh::TreeMesh2D)
+ _, equations, dg, cache = mesh_equations_solver_cache(semi)
+ (; boundary_conditions) = semi
+ # Calc bounds at interfaces and periodic boundaries
+ for interface in eachinterface(dg, cache)
+ # Get neighboring element ids
+ left = cache.interfaces.neighbor_ids[1, interface]
+ right = cache.interfaces.neighbor_ids[2, interface]
+
+ orientation = cache.interfaces.orientations[interface]
+
+ for i in eachnode(dg)
+ index_left = (nnodes(dg), i)
+ index_right = (1, i)
+ if orientation == 2
+ index_left = reverse(index_left)
+ index_right = reverse(index_right)
+ end
+ var_left = variable(get_node_vars(u, equations, dg, index_left..., left),
+ equations)
+ var_right = variable(get_node_vars(u, equations, dg, index_right..., right),
+ equations)
+
+ var_minmax[index_right..., right] = min_or_max(var_minmax[index_right...,
+ right], var_left)
+ var_minmax[index_left..., left] = min_or_max(var_minmax[index_left...,
+ left], var_right)
+ end
+ end
+
+ # Calc bounds at physical boundaries
+ for boundary in eachboundary(dg, cache)
+ element = cache.boundaries.neighbor_ids[boundary]
+ orientation = cache.boundaries.orientations[boundary]
+ neighbor_side = cache.boundaries.neighbor_sides[boundary]
+
+ for i in eachnode(dg)
+ if neighbor_side == 2 # Element is on the right, boundary on the left
+ index = (1, i)
+ boundary_index = 1
+ else # Element is on the left, boundary on the right
+ index = (nnodes(dg), i)
+ boundary_index = 2
+ end
+ if orientation == 2
+ index = reverse(index)
+ boundary_index += 2
+ end
+ u_outer = get_boundary_outer_state(boundary_conditions[boundary_index],
+ cache, t, equations, dg,
+ index..., element)
+ var_outer = variable(u_outer, equations)
+
+ var_minmax[index..., element] = min_or_max(var_minmax[index..., element],
+ var_outer)
+ end
+ end
+
+ return nothing
+end
+
###############################################################################
-# Local minimum/maximum limiting
+# Local two-sided limiting of conservative variables
-@inline function idp_local_minmax!(alpha, limiter, u, t, dt, semi)
- for variable in limiter.local_minmax_variables_cons
- idp_local_minmax!(alpha, limiter, u, t, dt, semi, variable)
+@inline function idp_local_twosided!(alpha, limiter, u, t, dt, semi)
+ for variable in limiter.local_twosided_variables_cons
+ idp_local_twosided!(alpha, limiter, u, t, dt, semi, variable)
end
return nothing
end
-@inline function idp_local_minmax!(alpha, limiter, u, t, dt, semi, variable)
+@inline function idp_local_twosided!(alpha, limiter, u, t, dt, semi, variable)
_, _, dg, cache = mesh_equations_solver_cache(semi)
(; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes
(; inverse_weights) = dg.basis
@@ -236,6 +343,40 @@ end
return nothing
end
+##############################################################################
+# Local one-sided limiting of nonlinear variables
+
+@inline function idp_local_onesided!(alpha, limiter, u, t, dt, semi)
+ for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
+ idp_local_onesided!(alpha, limiter, u, t, dt, semi, variable, min_or_max)
+ end
+
+ return nothing
+end
+
+@inline function idp_local_onesided!(alpha, limiter, u, t, dt, semi, variable,
+ min_or_max)
+ _, equations, dg, cache = mesh_equations_solver_cache(semi)
+ (; variable_bounds) = limiter.cache.subcell_limiter_coefficients
+ var_minmax = variable_bounds[Symbol(string(variable), "_", string(min_or_max))]
+ calc_bounds_onesided!(var_minmax, min_or_max, variable, u, t, semi)
+
+ # Perform Newton's bisection method to find new alpha
+ @threaded for element in eachelement(dg, cache)
+ inverse_jacobian = cache.elements.inverse_jacobian[element]
+ for j in eachnode(dg), i in eachnode(dg)
+ u_local = get_node_vars(u, equations, dg, i, j, element)
+ newton_loops_alpha!(alpha, var_minmax[i, j, element], u_local,
+ i, j, element, variable, min_or_max,
+ initial_check_local_onesided_newton_idp,
+ final_check_local_onesided_newton_idp, inverse_jacobian,
+ dt, equations, dg, cache, limiter)
+ end
+ end
+
+ return nothing
+end
+
###############################################################################
# Global positivity limiting
@@ -283,8 +424,8 @@ end
end
# Compute bound
- if limiter.local_minmax &&
- variable in limiter.local_minmax_variables_cons &&
+ if limiter.local_twosided &&
+ variable in limiter.local_twosided_variables_cons &&
var_min[i, j, element] >= positivity_correction_factor * var
# Local limiting is more restrictive that positivity limiting
# => Skip positivity limiting for this node
@@ -346,7 +487,7 @@ end
# Perform Newton's bisection method to find new alpha
newton_loops_alpha!(alpha, var_min[i, j, element], u_local, i, j, element,
- variable, initial_check_nonnegative_newton_idp,
+ variable, min, initial_check_nonnegative_newton_idp,
final_check_nonnegative_newton_idp, inverse_jacobian,
dt, equations, dg, cache, limiter)
end
@@ -356,8 +497,9 @@ end
end
@inline function newton_loops_alpha!(alpha, bound, u, i, j, element, variable,
- initial_check, final_check, inverse_jacobian, dt,
- equations, dg, cache, limiter)
+ min_or_max, initial_check, final_check,
+ inverse_jacobian, dt, equations, dg, cache,
+ limiter)
(; inverse_weights) = dg.basis
(; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes
@@ -367,37 +509,38 @@ end
antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[i] *
get_node_vars(antidiffusive_flux1_R, equations, dg, i, j,
element)
- newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check,
- equations, dt, limiter, antidiffusive_flux)
+ newton_loop!(alpha, bound, u, i, j, element, variable, min_or_max, initial_check,
+ final_check, equations, dt, limiter, antidiffusive_flux)
# positive xi direction
antidiffusive_flux = -gamma_constant_newton * inverse_jacobian *
inverse_weights[i] *
get_node_vars(antidiffusive_flux1_L, equations, dg, i + 1, j,
element)
- newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check,
- equations, dt, limiter, antidiffusive_flux)
+ newton_loop!(alpha, bound, u, i, j, element, variable, min_or_max, initial_check,
+ final_check, equations, dt, limiter, antidiffusive_flux)
# negative eta direction
antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[j] *
get_node_vars(antidiffusive_flux2_R, equations, dg, i, j,
element)
- newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check,
- equations, dt, limiter, antidiffusive_flux)
+ newton_loop!(alpha, bound, u, i, j, element, variable, min_or_max, initial_check,
+ final_check, equations, dt, limiter, antidiffusive_flux)
# positive eta direction
antidiffusive_flux = -gamma_constant_newton * inverse_jacobian *
inverse_weights[j] *
get_node_vars(antidiffusive_flux2_L, equations, dg, i, j + 1,
element)
- newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check,
- equations, dt, limiter, antidiffusive_flux)
+ newton_loop!(alpha, bound, u, i, j, element, variable, min_or_max, initial_check,
+ final_check, equations, dt, limiter, antidiffusive_flux)
return nothing
end
-@inline function newton_loop!(alpha, bound, u, i, j, element, variable, initial_check,
- final_check, equations, dt, limiter, antidiffusive_flux)
+@inline function newton_loop!(alpha, bound, u, i, j, element, variable, min_or_max,
+ initial_check, final_check, equations, dt, limiter,
+ antidiffusive_flux)
newton_reltol, newton_abstol = limiter.newton_tolerances
beta = 1 - alpha[i, j, element]
@@ -411,7 +554,7 @@ end
if isvalid(u_curr, equations)
goal = goal_function_newton_idp(variable, bound, u_curr, equations)
- initial_check(bound, goal, newton_abstol) && return nothing
+ initial_check(min_or_max, bound, goal, newton_abstol) && return nothing
end
# Newton iterations
@@ -446,7 +589,7 @@ end
# Check new beta for condition and update bounds
goal = goal_function_newton_idp(variable, bound, u_curr, equations)
- if initial_check(bound, goal, newton_abstol)
+ if initial_check(min_or_max, bound, goal, newton_abstol)
# New beta fulfills condition
beta_L = beta
else
@@ -479,18 +622,25 @@ end
end
new_alpha = 1 - beta
- if alpha[i, j, element] > new_alpha + newton_abstol
- error("Alpha is getting smaller. old: $(alpha[i, j, element]), new: $new_alpha")
- else
- alpha[i, j, element] = new_alpha
- end
+ alpha[i, j, element] = new_alpha
return nothing
end
### Auxiliary routines for Newton's bisection method ###
# Initial checks
-@inline initial_check_nonnegative_newton_idp(bound, goal, newton_abstol) = goal <= 0
+@inline function initial_check_local_onesided_newton_idp(::typeof(min), bound,
+ goal, newton_abstol)
+ goal <= max(newton_abstol, abs(bound) * newton_abstol)
+end
+
+@inline function initial_check_local_onesided_newton_idp(::typeof(max), bound,
+ goal, newton_abstol)
+ goal >= -max(newton_abstol, abs(bound) * newton_abstol)
+end
+
+@inline initial_check_nonnegative_newton_idp(min_or_max, bound, goal, newton_abstol) = goal <=
+ 0
# Goal and d(Goal)d(u) function
@inline goal_function_newton_idp(variable, bound, u, equations) = bound -
@@ -501,6 +651,12 @@ end
end
# Final checks
+# final check for one-sided local limiting
+@inline function final_check_local_onesided_newton_idp(bound, goal, newton_abstol)
+ abs(goal) < max(newton_abstol, abs(bound) * newton_abstol)
+end
+
+# final check for nonnegativity limiting
@inline function final_check_nonnegative_newton_idp(bound, goal, newton_abstol)
(goal <= eps()) && (goal > -max(newton_abstol, abs(bound) * newton_abstol))
end
diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl
index d593dc540f7..efd4058031f 100644
--- a/test/test_tree_2d_euler.jl
+++ b/test/test_tree_2d_euler.jl
@@ -349,16 +349,16 @@ end
@test_trixi_include(joinpath(EXAMPLES_DIR,
"elixir_euler_blast_wave_sc_subcell_nonperiodic.jl"),
l2=[
- 0.3517507570120483,
- 0.19252291020146015,
- 0.19249751956580294,
- 0.618717827188004,
+ 0.3221177942225801,
+ 0.1798478357478982,
+ 0.1798364616438908,
+ 0.6136884131056267,
],
linf=[
- 1.6699566795772216,
- 1.3608007992899402,
- 1.361864507190922,
- 2.44022884092527,
+ 1.343766644801395,
+ 1.1749593109683463,
+ 1.1747613085307178,
+ 2.4216006041018785,
],
tspan=(0.0, 0.5),
initial_refinement_level=4,
@@ -403,16 +403,16 @@ end
@test_trixi_include(joinpath(EXAMPLES_DIR,
"elixir_euler_sedov_blast_wave_sc_subcell.jl"),
l2=[
- 0.432804941135901,
- 0.15009019787510924,
- 0.15009019787510922,
- 0.6160764058367757,
+ 0.41444427153173785,
+ 0.1460669409661223,
+ 0.14606693069201596,
+ 0.6168046457461059,
],
linf=[
- 1.6122663996643651,
- 0.8612394422674909,
- 0.8612394422674919,
- 6.449588561676761,
+ 1.5720584643579567,
+ 0.7946656826861964,
+ 0.7946656525739751,
+ 6.455520291414711,
],
tspan=(0.0, 1.0),
initial_refinement_level=4,
diff --git a/test/test_unit.jl b/test/test_unit.jl
index c72b51113f5..90ee21030d3 100644
--- a/test/test_unit.jl
+++ b/test/test_unit.jl
@@ -416,8 +416,9 @@ end
indicator_hg = IndicatorHennemannGassner(1.0, 0.0, true, "variable", "cache")
@test_nowarn show(stdout, indicator_hg)
- limiter_idp = SubcellLimiterIDP(true, [1], true, [1], ["variable"], 0.1, "cache", 1,
- (1.0, 1.0), 1.0)
+ limiter_idp = SubcellLimiterIDP(true, [1], true, [1], ["variable"], 0.1,
+ true, [(Trixi.entropy_guermond_etal, min)], "cache",
+ 1, (1.0, 1.0), 1.0)
@test_nowarn show(stdout, limiter_idp)
indicator_loehner = IndicatorLöhner(1.0, "variable", (; cache = nothing))
From ff3106c5944cdb4fbc47ca485e6f716d2d9ffae4 Mon Sep 17 00:00:00 2001
From: Michael Schlottke-Lakemper
Date: Thu, 9 May 2024 17:12:01 +0200
Subject: [PATCH 06/44] Add changelog to docs (#1933)
* Add changelog to docs
* Update docs/make.jl
Co-authored-by: Hendrik Ranocha
* Add compat bounds :facepalm:
* Update review checklist to include PR number
---------
Co-authored-by: Hendrik Ranocha
---
.github/review-checklist.md | 4 ++--
NEWS.md | 23 +++++++++++------------
docs/.gitignore | 1 +
docs/Project.toml | 2 ++
docs/make.jl | 10 ++++++++++
5 files changed, 26 insertions(+), 14 deletions(-)
diff --git a/.github/review-checklist.md b/.github/review-checklist.md
index 2d8a24f1971..69410a07011 100644
--- a/.github/review-checklist.md
+++ b/.github/review-checklist.md
@@ -19,7 +19,7 @@ This checklist is meant to assist creators of PRs (to let them know what reviewe
- [ ] Relevant publications are referenced in docstrings (see [example](https://github.com/trixi-framework/Trixi.jl/blob/7f83a1a938eecd9b841efe215a6e482e67cfdcc1/src/equations/compressible_euler_2d.jl#L601-L615) for formatting).
- [ ] Inline comments are used to document longer or unusual code sections.
- [ ] Comments describe intent ("why?") and not just functionality ("what?").
-- [ ] If the PR introduces a significant change or new feature, it is documented in `NEWS.md`.
+- [ ] If the PR introduces a significant change or new feature, it is documented in `NEWS.md` with its PR number.
#### Testing
- [ ] The PR passes all tests.
@@ -35,4 +35,4 @@ This checklist is meant to assist creators of PRs (to let them know what reviewe
- [ ] If new equations/methods are added, a convergence test has been run and the results
are posted in the PR.
-*Created with :heart: by the Trixi.jl community.*
\ No newline at end of file
+*Created with :heart: by the Trixi.jl community.*
diff --git a/NEWS.md b/NEWS.md
index 0b4279e9e7a..e2902229f71 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -8,11 +8,11 @@ for human readability.
#### Added
- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D` and extension
- to 1D and 3D on `TreeMesh`.
-- Implementation of 1D Linearized Euler Equations.
-- New analysis callback for 2D `P4estMesh` to compute integrated quantities along a boundary surface, e.g., pressure lift and drag coefficients.
-- Optional tuple parameter for `GlmSpeedCallback` called `semi_indices` to specify for which semidiscretization of a `SemidiscretizationCoupled` we need to update the GLM speed.
-- Subcell local one-sided limiting support for nonlinear variables in 2D for `TreeMesh`.
+ to 1D and 3D on `TreeMesh` ([#1855], [#1873]).
+- Implementation of 1D Linearized Euler Equations ([#1867]).
+- New analysis callback for 2D `P4estMesh` to compute integrated quantities along a boundary surface, e.g., pressure lift and drag coefficients ([#1812]).
+- Optional tuple parameter for `GlmSpeedCallback` called `semi_indices` to specify for which semidiscretization of a `SemidiscretizationCoupled` we need to update the GLM speed ([#1835]).
+- Subcell local one-sided limiting support for nonlinear variables in 2D for `TreeMesh` ([#1792]).
## Changes when updating to v0.7 from v0.6.x
@@ -190,9 +190,8 @@ for human readability.
#### Added
- Experimental support for artificial neural network-based indicators for shock capturing and
- adaptive mesh refinement ([#632](https://github.com/trixi-framework/Trixi.jl/pull/632))
-- Experimental support for direct-hybrid aeroacoustics simulations
- ([#712](https://github.com/trixi-framework/Trixi.jl/pull/712))
+ adaptive mesh refinement ([#632])
+- Experimental support for direct-hybrid aeroacoustics simulations ([#712])
- Implementation of shallow water equations in 2D
- Experimental support for interactive visualization with [Makie.jl](https://makie.juliaplots.org/)
@@ -228,7 +227,7 @@ for human readability.
- acoustic perturbation equations
- Lattice-Boltzmann equations
- Composable `FluxPlusDissipation` and `FluxLaxFriedrichs()`, `FluxHLL()` with adaptable
- wave speed estimates were added in [#493](https://github.com/trixi-framework/Trixi.jl/pull/493)
+ wave speed estimates were added in [#493]
- New structured, curvilinear, conforming mesh type `StructuredMesh`
- New unstructured, curvilinear, conforming mesh type `UnstructuredMesh2D` in 2D
- New unstructured, curvilinear, adaptive (non-conforming) mesh type `P4estMesh` in 2D and 3D
@@ -241,13 +240,13 @@ for human readability.
- `flux_lax_friedrichs(u_ll, u_rr, orientation, equations::LatticeBoltzmannEquations2D)` and
`flux_lax_friedrichs(u_ll, u_rr, orientation, equations::LatticeBoltzmannEquations3D)`
were actually using the logic of `flux_godunov`. Thus, they were renamed accordingly
- in [#493](https://github.com/trixi-framework/Trixi.jl/pull/493). This is considered a bugfix
+ in [#493]. This is considered a bugfix
(released in Trixi.jl v0.3.22).
- The required Julia version is updated to v1.6.
#### Deprecated
-- `calcflux` → `flux` ([#463](https://github.com/trixi-framework/Trixi.jl/pull/463))
+- `calcflux` → `flux` ([#463])
- `flux_upwind` → `flux_godunov`
- `flux_hindenlang` → `flux_hindenlang_gassner`
- Providing the keyword argument `solution_variables` of `SaveSolutionCallback`
@@ -259,6 +258,6 @@ for human readability.
only a single two-point numerical flux for nonconservative is deprecated. The new
interface is described in a tutorial. Now, a tuple of two numerical fluxes of the
form `(conservative_flux, nonconservative_flux)` needs to be passed for
- nonconservative equations, see [#657](https://github.com/trixi-framework/Trixi.jl/pull/657).
+ nonconservative equations, see [#657].
#### Removed
diff --git a/docs/.gitignore b/docs/.gitignore
index c8a9e842246..cc6f90fae09 100644
--- a/docs/.gitignore
+++ b/docs/.gitignore
@@ -1,3 +1,4 @@
+src/changelog.md
src/code_of_conduct.md
src/contributing.md
diff --git a/docs/Project.toml b/docs/Project.toml
index 3b8d169fdb8..9f9bb956274 100644
--- a/docs/Project.toml
+++ b/docs/Project.toml
@@ -1,5 +1,6 @@
[deps]
CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
+Changelog = "5217a498-cd5d-4ec6-b8c2-9b85a09b6e3e"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
HOHQMesh = "e4f4c7b8-17cb-445a-93c5-f69190ed6c8c"
@@ -14,6 +15,7 @@ TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284"
[compat]
CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10, 0.11"
+Changelog = "1.1"
Documenter = "1"
ForwardDiff = "0.10"
HOHQMesh = "0.1, 0.2"
diff --git a/docs/make.jl b/docs/make.jl
index 50251024262..73ee86abd8d 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -1,5 +1,6 @@
using Documenter
import Pkg
+using Changelog: Changelog
# Fix for https://github.com/trixi-framework/Trixi.jl/issues/668
if (get(ENV, "CI", nothing) != "true") && (get(ENV, "TRIXI_DOC_DEFAULT_ENVIRONMENT", nothing) != "true")
@@ -99,6 +100,14 @@ files = [
]
tutorials = create_tutorials(files)
+# Create changelog
+Changelog.generate(
+ Changelog.Documenter(), # output type
+ joinpath(@__DIR__, "..", "NEWS.md"), # input file
+ joinpath(@__DIR__, "src", "changelog.md"); # output file
+ repo = "trixi-framework/Trixi.jl", # default repository for links
+)
+
# Make documentation
makedocs(
# Specify modules for which docstrings should be shown
@@ -151,6 +160,7 @@ makedocs(
"TrixiBase.jl" => "reference-trixibase.md",
"Trixi2Vtk.jl" => "reference-trixi2vtk.md"
],
+ "Changelog" => "changelog.md",
"Authors" => "authors.md",
"Contributing" => "contributing.md",
"Code of Conduct" => "code_of_conduct.md",
From 8a9fc7baeca9807de185592d5cb8d60040a24f09 Mon Sep 17 00:00:00 2001
From: Michael Schlottke-Lakemper
Date: Fri, 10 May 2024 06:42:17 +0200
Subject: [PATCH 07/44] Add StructuredMeshView as proxy between solver and
actual StructuredMesh (#1624)
* Add StructuredMeshView as proxy between solver and actual StructuredMesh
* Enable StructuredMeshView on submesh
* Attempt to using coupled with meshviews.
* Update structured_mesh_view.jl
Format.
* Applied autoformatter on smview elixir.
* Applied autoformatter on meshview.
* Corected minor typo with StructuredMeshView.
* Corrected x-boundaries for smview example elixir.
* Removed parent's periodicity for smview.
* Removed redundant and problematic redifintion of StructuredMeshView function.
* Applied auto formatter on files affected by the structured mesh view.
* Applied autoforatter.
* Added entries for meshview IO.
* Added structuresd mesh view to parametric types.
* Added unctionality of writing out mesh files for StructuredMeshView
for different time steps.
* Added temptative code for dynamically changing mesh view sizes.
* Applied autoformatter.
* Corrected the calculation of the coordinate mapping for mesh views.
* Added cells_per_dimension to the StructuredMeshView.
* Added StructuredMeshView to max_dt clculations.
* Applied autoformatter.
* Applied autoformatter.
* Cleaned up meshview coupled example.
* Added 2d structured mesh views to periodicity checks.
* Cleaned up coupled strucutred mesh view example so that it
can be used as test.
* Added 2d structured mesh view to the tests.
* Added analysis_interval variable.
* Applied updated autoformatter.
* Removed unused lines of code.
Added comment about muladd.
* Added explanatory comments.
* Removed unused code.
* Update examples/structured_2d_dgsem/elixir_advection_smview.jl
Co-authored-by: Daniel Doehring
* Update examples/structured_2d_dgsem/elixir_advection_smview.jl
Co-authored-by: Daniel Doehring
* Update examples/structured_2d_dgsem/elixir_advection_smview.jl
Co-authored-by: Daniel Doehring
* Update src/meshes/mesh_io.jl
Co-authored-by: Daniel Doehring
* Update src/meshes/structured_mesh_view.jl
Co-authored-by: Daniel Doehring
* Corrected comment on solver.
* Changed order of imported meshes.
* Update src/meshes/structured_mesh_view.jl
Co-authored-by: Daniel Doehring
* Added comment about coupling interface indexing.
* Applied autoformatter.
* Update src/meshes/structured_mesh_view.jl
Co-authored-by: Daniel Doehring
* Renamed index_min and index_max to indices_min and indices_max.
* Removed relative tolerance from meshview test.
* Removed further relative tolerance as it is not needed.
* Update examples/structured_2d_dgsem/elixir_advection_smview.jl
Co-authored-by: Daniel Doehring
* Fixed typo on parent mesh generation.
* Applied autoformatter.
* Updated test results for elixir_advection_smview.jl.
* Update src/meshes/mesh_io.jl
Co-authored-by: Michael Schlottke-Lakemper
* Update src/meshes/mesh_io.jl
Co-authored-by: Michael Schlottke-Lakemper
* Renamed elixir_advection_smview.jl to elixir_advection_meshview.jl
since 'structured' is redundant for a file in this directory.
* Renamed elixir_advection_smview.jl to elixir_advection_meshview.jl.
* Moved save_mesh_file function for StructuredMeshView.
* Update src/meshes/structured_mesh_view.jl
Co-authored-by: Michael Schlottke-Lakemper
* Removed redundant check for structured mesh views.
---------
Co-authored-by: SimonCan
Co-authored-by: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com>
Co-authored-by: iomsn
Co-authored-by: Daniel Doehring
---
.../elixir_advection_meshview.jl | 122 ++++++++++++++++
src/Trixi.jl | 3 +-
src/callbacks_step/analysis_dg2d.jl | 22 +--
src/callbacks_step/save_solution_dg.jl | 1 +
src/callbacks_step/stepsize_dg2d.jl | 4 +-
src/meshes/mesh_io.jl | 7 +-
src/meshes/meshes.jl | 1 +
src/meshes/structured_mesh_view.jl | 132 ++++++++++++++++++
.../semidiscretization_coupled.jl | 10 +-
.../semidiscretization_hyperbolic.jl | 3 +-
src/solvers/dg.jl | 5 +-
src/solvers/dgsem_structured/containers.jl | 3 +-
src/solvers/dgsem_structured/containers_2d.jl | 7 +-
src/solvers/dgsem_structured/dg.jl | 11 +-
src/solvers/dgsem_structured/dg_2d.jl | 23 +--
src/solvers/dgsem_tree/dg_2d.jl | 8 +-
test/test_structured_2d.jl | 24 ++++
17 files changed, 347 insertions(+), 39 deletions(-)
create mode 100644 examples/structured_2d_dgsem/elixir_advection_meshview.jl
create mode 100644 src/meshes/structured_mesh_view.jl
diff --git a/examples/structured_2d_dgsem/elixir_advection_meshview.jl b/examples/structured_2d_dgsem/elixir_advection_meshview.jl
new file mode 100644
index 00000000000..d8d27031090
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_advection_meshview.jl
@@ -0,0 +1,122 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Coupled semidiscretization of two linear advection systems using converter functions
+# and mesh views for the semidiscretizations. First we define a parent mesh
+# for the entire physical domain, then we define the two mesh views on this parent.
+#
+# In this elixir, we have a square domain that is divided into left and right subdomains.
+# On each half of the domain, a completely independent `SemidiscretizationHyperbolic`
+# is created for the linear advection equations. The two systems are coupled in the
+# x-direction.
+# For a high-level overview, see also the figure below:
+#
+# (-1, 1) ( 1, 1)
+# ┌────────────────────┬────────────────────┐
+# │ ↑ periodic ↑ │ ↑ periodic ↑ │
+# │ │ │
+# │ ========= │ ========= │
+# │ system #1 │ system #2 │
+# │ ========= │ ========= │
+# │ │ │
+# │<-- coupled │<-- coupled │
+# │ coupled -->│ coupled -->│
+# │ │ │
+# │ ↓ periodic ↓ │ ↓ periodic ↓ │
+# └────────────────────┴────────────────────┘
+# (-1, -1) ( 1, -1)
+
+advection_velocity = (0.2, -0.7)
+equations = LinearScalarAdvectionEquation2D(advection_velocity)
+
+# Create DG solver with polynomial degree = 3
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+# Domain size of the parent mesh.
+coordinates_min = (-1.0, -1.0)
+coordinates_max = (1.0, 1.0)
+
+# Cell dimensions of the parent mesh.
+cells_per_dimension = (16, 16)
+
+# Create parent mesh with 16 x 16 elements
+parent_mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max)
+
+# Create the two mesh views, each of which takes half of the parent mesh.
+mesh1 = StructuredMeshView(parent_mesh; indices_min = (1, 1), indices_max = (8, 16))
+mesh2 = StructuredMeshView(parent_mesh; indices_min = (9, 1), indices_max = (16, 16))
+
+# The coupling function is simply the identity, as we are dealing with two identical systems.
+coupling_function = (x, u, equations_other, equations_own) -> u
+
+# Define the coupled boundary conditions
+# The indices (:end, :i_forward) and (:begin, :i_forward) denote the interface indexing.
+# For a system with coupling in x and y see examples/structured_2d_dgsem/elixir_advection_coupled.jl.
+boundary_conditions1 = (
+ # Connect left boundary with right boundary of left mesh
+ x_neg = BoundaryConditionCoupled(2, (:end, :i_forward), Float64,
+ coupling_function),
+ x_pos = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64,
+ coupling_function),
+ y_neg = boundary_condition_periodic,
+ y_pos = boundary_condition_periodic)
+boundary_conditions2 = (
+ # Connect left boundary with right boundary of left mesh
+ x_neg = BoundaryConditionCoupled(1, (:end, :i_forward), Float64,
+ coupling_function),
+ x_pos = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64,
+ coupling_function),
+ y_neg = boundary_condition_periodic,
+ y_pos = boundary_condition_periodic)
+
+# A semidiscretization collects data structures and functions for the spatial discretization
+semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test,
+ solver,
+ boundary_conditions = boundary_conditions1)
+semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test,
+ solver,
+ boundary_conditions = boundary_conditions2)
+semi = SemidiscretizationCoupled(semi1, semi2)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 1.0
+ode = semidiscretize(semi, (0.0, 1.0));
+
+# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup
+# and resets the timers
+summary_callback = SummaryCallback()
+
+# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results
+analysis_callback1 = AnalysisCallback(semi1, interval = 100)
+analysis_callback2 = AnalysisCallback(semi2, interval = 100)
+analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2)
+
+analysis_interval = 100
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+# The SaveSolutionCallback allows to save the solution to a file in regular intervals
+save_solution = SaveSolutionCallback(interval = 100,
+ save_initial_solution = true,
+ save_final_solution = true,
+ solution_variables = cons2prim)
+
+# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl = 1.6)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, save_solution,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 5.0e-2, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+
+# Print the timer summary
+summary_callback()
diff --git a/src/Trixi.jl b/src/Trixi.jl
index 5511f7e02e2..f3977f1f058 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -225,7 +225,8 @@ export entropy, energy_total, energy_kinetic, energy_internal, energy_magnetic,
export lake_at_rest_error
export ncomponents, eachcomponent
-export TreeMesh, StructuredMesh, UnstructuredMesh2D, P4estMesh, T8codeMesh
+export TreeMesh, StructuredMesh, StructuredMeshView, UnstructuredMesh2D, P4estMesh,
+ T8codeMesh
export DG,
DGSEM, LobattoLegendreBasis,
diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl
index a9e0cf87b0a..de6b9a2a4a6 100644
--- a/src/callbacks_step/analysis_dg2d.jl
+++ b/src/callbacks_step/analysis_dg2d.jl
@@ -30,7 +30,8 @@ function create_cache_analysis(analyzer, mesh::TreeMesh{2},
end
function create_cache_analysis(analyzer,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
+ UnstructuredMesh2D,
P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache,
RealT, uEltype)
@@ -107,8 +108,9 @@ function calc_error_norms(func, u, t, analyzer,
end
function calc_error_norms(func, u, t, analyzer,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}, T8codeMesh{2}}, equations,
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
+ UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}},
+ equations,
initial_condition, dg::DGSEM, cache, cache_analysis)
@unpack vandermonde, weights = analyzer
@unpack node_coordinates, inverse_jacobian = cache.elements
@@ -175,8 +177,10 @@ function integrate_via_indices(func::Func, u,
end
function integrate_via_indices(func::Func, u,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}, T8codeMesh{2}}, equations,
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
+ UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
+ equations,
dg::DGSEM, cache, args...; normalize = true) where {Func}
@unpack weights = dg.basis
@@ -203,8 +207,8 @@ function integrate_via_indices(func::Func, u,
end
function integrate(func::Func, u,
- mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}, T8codeMesh{2}},
+ mesh::Union{TreeMesh{2}, StructuredMesh{2}, StructuredMeshView{2},
+ UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache; normalize = true) where {Func}
integrate_via_indices(u, mesh, equations, dg, cache;
normalize = normalize) do u, i, j, element, equations, dg
@@ -232,8 +236,8 @@ function integrate(func::Func, u,
end
function analyze(::typeof(entropy_timederivative), du, u, t,
- mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}, T8codeMesh{2}},
+ mesh::Union{TreeMesh{2}, StructuredMesh{2}, StructuredMeshView{2},
+ UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache)
# Calculate ∫(∂S/∂u ⋅ ∂u/∂t)dΩ
integrate_via_indices(u, mesh, equations, dg, cache,
diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl
index 350aee7336a..7367886ca94 100644
--- a/src/callbacks_step/save_solution_dg.jl
+++ b/src/callbacks_step/save_solution_dg.jl
@@ -7,6 +7,7 @@
function save_solution_file(u, time, dt, timestep,
mesh::Union{SerialTreeMesh, StructuredMesh,
+ StructuredMeshView,
UnstructuredMesh2D, SerialP4estMesh,
SerialT8codeMesh},
equations, dg::DG, cache,
diff --git a/src/callbacks_step/stepsize_dg2d.jl b/src/callbacks_step/stepsize_dg2d.jl
index c6d32c0f6dc..41251506a0d 100644
--- a/src/callbacks_step/stepsize_dg2d.jl
+++ b/src/callbacks_step/stepsize_dg2d.jl
@@ -77,7 +77,7 @@ end
function max_dt(u, t,
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
- T8codeMesh{2}},
+ T8codeMesh{2}, StructuredMeshView{2}},
constant_speed::False, equations, dg::DG, cache)
# to avoid a division by zero if the speed vanishes everywhere,
# e.g. for steady-state linear advection
@@ -113,7 +113,7 @@ end
function max_dt(u, t,
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
- T8codeMesh{2}},
+ T8codeMesh{2}, StructuredMeshView{2}},
constant_speed::True, equations, dg::DG, cache)
@unpack contravariant_vectors, inverse_jacobian = cache.elements
diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl
index c1d78cbbf1e..d74a0c0cea1 100644
--- a/src/meshes/mesh_io.jl
+++ b/src/meshes/mesh_io.jl
@@ -97,7 +97,10 @@ end
# of the mesh, like its size and the type of boundary mapping function.
# Then, within Trixi2Vtk, the StructuredMesh and its node coordinates are reconstructured from
# these attributes for plotting purposes
-function save_mesh_file(mesh::StructuredMesh, output_directory; system = "")
+# Note: the `timestep` argument is needed for compatibility with the method for
+# `StructuredMeshView`
+function save_mesh_file(mesh::StructuredMesh, output_directory; system = "",
+ timestep = 0)
# Create output directory (if it does not exist)
mkpath(output_directory)
@@ -256,7 +259,7 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT)
end
mesh = TreeMesh(SerialTree{ndims}, max(n_cells_max, capacity))
load_mesh!(mesh, mesh_file)
- elseif mesh_type == "StructuredMesh"
+ elseif mesh_type in ("StructuredMesh", "StructuredMeshView")
size_, mapping_as_string = h5open(mesh_file, "r") do file
return read(attributes(file)["size"]),
read(attributes(file)["mapping"])
diff --git a/src/meshes/meshes.jl b/src/meshes/meshes.jl
index ed2158b169a..4d6016e5564 100644
--- a/src/meshes/meshes.jl
+++ b/src/meshes/meshes.jl
@@ -7,6 +7,7 @@
include("tree_mesh.jl")
include("structured_mesh.jl")
+include("structured_mesh_view.jl")
include("surface_interpolant.jl")
include("unstructured_mesh.jl")
include("face_interpolant.jl")
diff --git a/src/meshes/structured_mesh_view.jl b/src/meshes/structured_mesh_view.jl
new file mode 100644
index 00000000000..bd55115cc90
--- /dev/null
+++ b/src/meshes/structured_mesh_view.jl
@@ -0,0 +1,132 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+"""
+ StructuredMeshView{NDIMS, RealT <: Real} <: AbstractMesh{NDIMS}
+
+A view on a structured curved mesh.
+"""
+mutable struct StructuredMeshView{NDIMS, RealT <: Real} <: AbstractMesh{NDIMS}
+ parent::StructuredMesh{NDIMS, RealT}
+ cells_per_dimension::NTuple{NDIMS, Int}
+ mapping::Any # Not relevant for performance
+ mapping_as_string::String
+ current_filename::String
+ indices_min::NTuple{NDIMS, Int}
+ indices_max::NTuple{NDIMS, Int}
+ unsaved_changes::Bool
+end
+
+"""
+ StructuredMeshView(parent; indices_min, indices_max)
+
+Create a StructuredMeshView on a StructuredMesh parent.
+
+# Arguments
+- `parent`: the parent StructuredMesh.
+- `indices_min`: starting indices of the parent mesh.
+- `indices_max`: ending indices of the parent mesh.
+"""
+function StructuredMeshView(parent::StructuredMesh{NDIMS, RealT};
+ indices_min = ntuple(_ -> 1, Val(NDIMS)),
+ indices_max = size(parent)) where {NDIMS, RealT}
+ @assert indices_min <= indices_max
+ @assert all(indices_min .> 0)
+ @assert indices_max <= size(parent)
+
+ cells_per_dimension = indices_max .- indices_min .+ 1
+
+ # Compute cell sizes `deltas`
+ deltas = (parent.mapping.coordinates_max .- parent.mapping.coordinates_min) ./
+ parent.cells_per_dimension
+ # Calculate the domain boundaries.
+ coordinates_min = parent.mapping.coordinates_min .+ deltas .* (indices_min .- 1)
+ coordinates_max = parent.mapping.coordinates_min .+ deltas .* indices_max
+ mapping = coordinates2mapping(coordinates_min, coordinates_max)
+ mapping_as_string = """
+ coordinates_min = $coordinates_min
+ coordinates_max = $coordinates_max
+ mapping = coordinates2mapping(coordinates_min, coordinates_max)
+ """
+
+ return StructuredMeshView{NDIMS, RealT}(parent, cells_per_dimension, mapping,
+ mapping_as_string,
+ parent.current_filename,
+ indices_min, indices_max,
+ parent.unsaved_changes)
+end
+
+# Check if mesh is periodic
+function isperiodic(mesh::StructuredMeshView)
+ @unpack parent = mesh
+ return isperiodic(parent) && size(parent) == size(mesh)
+end
+
+function isperiodic(mesh::StructuredMeshView, dimension)
+ @unpack parent, indices_min, indices_max = mesh
+ return (isperiodic(parent, dimension) &&
+ indices_min[dimension] == 1 &&
+ indices_max[dimension] == size(parent, dimension))
+end
+
+@inline Base.ndims(::StructuredMeshView{NDIMS}) where {NDIMS} = NDIMS
+@inline Base.real(::StructuredMeshView{NDIMS, RealT}) where {NDIMS, RealT} = RealT
+function Base.size(mesh::StructuredMeshView)
+ @unpack indices_min, indices_max = mesh
+ return indices_max .- indices_min .+ 1
+end
+function Base.size(mesh::StructuredMeshView, i)
+ @unpack indices_min, indices_max = mesh
+ return indices_max[i] - indices_min[i] + 1
+end
+Base.axes(mesh::StructuredMeshView) = map(Base.OneTo, size(mesh))
+Base.axes(mesh::StructuredMeshView, i) = Base.OneTo(size(mesh, i))
+
+function calc_node_coordinates!(node_coordinates, element,
+ cell_x, cell_y, mapping,
+ mesh::StructuredMeshView{2},
+ basis)
+ @unpack nodes = basis
+
+ # Get cell length in reference mesh
+ dx = 2 / size(mesh, 1)
+ dy = 2 / size(mesh, 2)
+
+ # Calculate node coordinates of reference mesh
+ cell_x_offset = -1 + (cell_x - 1) * dx + dx / 2
+ cell_y_offset = -1 + (cell_y - 1) * dy + dy / 2
+
+ for j in eachnode(basis), i in eachnode(basis)
+ # node_coordinates are the mapped reference node_coordinates
+ node_coordinates[:, i, j, element] .= mapping(cell_x_offset + dx / 2 * nodes[i],
+ cell_y_offset + dy / 2 * nodes[j])
+ end
+end
+
+# Does not save the mesh itself to an HDF5 file. Instead saves important attributes
+# of the mesh, like its size and the type of boundary mapping function.
+# Then, within Trixi2Vtk, the StructuredMesh and its node coordinates are reconstructured from
+# these attributes for plotting purposes.
+function save_mesh_file(mesh::StructuredMeshView, output_directory; system = "",
+ timestep = 0)
+ # Create output directory (if it does not exist)
+ mkpath(output_directory)
+
+ filename = joinpath(output_directory, @sprintf("mesh_%s_%06d.h5", system, timestep))
+
+ # Open file (clobber existing content)
+ h5open(filename, "w") do file
+ # Add context information as attributes
+ attributes(file)["mesh_type"] = get_name(mesh)
+ attributes(file)["ndims"] = ndims(mesh)
+ attributes(file)["size"] = collect(size(mesh))
+ attributes(file)["mapping"] = mesh.mapping_as_string
+ end
+
+ return filename
+end
+end # @muladd
diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl
index 33803752c2e..cc629d1a674 100644
--- a/src/semidiscretization/semidiscretization_coupled.jl
+++ b/src/semidiscretization/semidiscretization_coupled.jl
@@ -316,7 +316,8 @@ function save_mesh(semi::SemidiscretizationCoupled, output_directory, timestep =
mesh, _, _, _ = mesh_equations_solver_cache(semi.semis[i])
if mesh.unsaved_changes
- mesh.current_filename = save_mesh_file(mesh, output_directory, system = i)
+ mesh.current_filename = save_mesh_file(mesh, output_directory; system = i,
+ timestep = timestep)
mesh.unsaved_changes = false
end
end
@@ -630,7 +631,9 @@ end
@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t,
orientation,
boundary_condition::BoundaryConditionCoupled,
- mesh::StructuredMesh, equations,
+ mesh::Union{StructuredMesh,
+ StructuredMeshView},
+ equations,
surface_integral, dg::DG, cache,
direction, node_indices,
surface_node_indices, element)
@@ -662,7 +665,8 @@ end
end
end
-function get_boundary_indices(element, orientation, mesh::StructuredMesh{2})
+function get_boundary_indices(element, orientation,
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2}})
cartesian_indices = CartesianIndices(size(mesh))
if orientation == 1
# Get index of element in y-direction
diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl
index f61378a7dca..dcd211671c8 100644
--- a/src/semidiscretization/semidiscretization_hyperbolic.jl
+++ b/src/semidiscretization/semidiscretization_hyperbolic.jl
@@ -259,7 +259,8 @@ function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{1},
end
function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{2},
- StructuredMesh{2}},
+ StructuredMesh{2},
+ StructuredMeshView{2}},
boundary_conditions::Union{NamedTuple,
Tuple})
check_periodicity_mesh_boundary_conditions_x(mesh, boundary_conditions[1],
diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl
index 9b61df62cc3..0ab947e697a 100644
--- a/src/solvers/dg.jl
+++ b/src/solvers/dg.jl
@@ -436,8 +436,9 @@ function get_node_variables!(node_variables, mesh, equations, dg::DG, cache)
get_node_variables!(node_variables, mesh, equations, dg.volume_integral, dg, cache)
end
-const MeshesDGSEM = Union{TreeMesh, StructuredMesh, UnstructuredMesh2D, P4estMesh,
- T8codeMesh}
+const MeshesDGSEM = Union{TreeMesh, StructuredMesh, StructuredMeshView,
+ UnstructuredMesh2D,
+ P4estMesh, T8codeMesh}
@inline function ndofs(mesh::MeshesDGSEM, dg::DG, cache)
nelements(cache.elements) * nnodes(dg)^ndims(mesh)
diff --git a/src/solvers/dgsem_structured/containers.jl b/src/solvers/dgsem_structured/containers.jl
index 8adf005b782..7b0d275c5b5 100644
--- a/src/solvers/dgsem_structured/containers.jl
+++ b/src/solvers/dgsem_structured/containers.jl
@@ -23,7 +23,8 @@ struct ElementContainer{NDIMS, RealT <: Real, uEltype <: Real, NDIMSP1, NDIMSP2,
end
# Create element container and initialize element data
-function init_elements(mesh::StructuredMesh{NDIMS, RealT},
+function init_elements(mesh::Union{StructuredMesh{NDIMS, RealT},
+ StructuredMeshView{NDIMS, RealT}},
equations::AbstractEquations,
basis,
::Type{uEltype}) where {NDIMS, RealT <: Real, uEltype <: Real}
diff --git a/src/solvers/dgsem_structured/containers_2d.jl b/src/solvers/dgsem_structured/containers_2d.jl
index fb6db48e0a5..8a0722fc5d5 100644
--- a/src/solvers/dgsem_structured/containers_2d.jl
+++ b/src/solvers/dgsem_structured/containers_2d.jl
@@ -6,7 +6,8 @@
#! format: noindent
# Initialize data structures in element container
-function init_elements!(elements, mesh::StructuredMesh{2}, basis::LobattoLegendreBasis)
+function init_elements!(elements, mesh::Union{StructuredMesh{2}, StructuredMeshView{2}},
+ basis::LobattoLegendreBasis)
@unpack node_coordinates, left_neighbors,
jacobian_matrix, contravariant_vectors, inverse_jacobian = elements
@@ -148,7 +149,9 @@ function calc_inverse_jacobian!(inverse_jacobian::AbstractArray{<:Any, 3}, eleme
end
# Save id of left neighbor of every element
-function initialize_left_neighbor_connectivity!(left_neighbors, mesh::StructuredMesh{2},
+function initialize_left_neighbor_connectivity!(left_neighbors,
+ mesh::Union{StructuredMesh{2},
+ StructuredMeshView{2}},
linear_indices)
# Neighbors in x-direction
for cell_y in 1:size(mesh, 2)
diff --git a/src/solvers/dgsem_structured/dg.jl b/src/solvers/dgsem_structured/dg.jl
index 5cf4c4ef78c..00e321fba65 100644
--- a/src/solvers/dgsem_structured/dg.jl
+++ b/src/solvers/dgsem_structured/dg.jl
@@ -8,7 +8,8 @@
# This method is called when a SemidiscretizationHyperbolic is constructed.
# It constructs the basic `cache` used throughout the simulation to compute
# the RHS etc.
-function create_cache(mesh::StructuredMesh, equations::AbstractEquations, dg::DG, ::Any,
+function create_cache(mesh::Union{StructuredMesh, StructuredMeshView},
+ equations::AbstractEquations, dg::DG, ::Any,
::Type{uEltype}) where {uEltype <: Real}
elements = init_elements(mesh, equations, dg.basis, uEltype)
@@ -30,7 +31,9 @@ end
@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t,
orientation,
boundary_condition::BoundaryConditionPeriodic,
- mesh::StructuredMesh, equations,
+ mesh::Union{StructuredMesh,
+ StructuredMeshView},
+ equations,
surface_integral, dg::DG, cache,
direction, node_indices,
surface_node_indices, element)
@@ -40,7 +43,9 @@ end
@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t,
orientation,
boundary_condition,
- mesh::StructuredMesh, equations,
+ mesh::Union{StructuredMesh,
+ StructuredMeshView},
+ equations,
surface_integral, dg::DG, cache,
direction, node_indices,
surface_node_indices, element)
diff --git a/src/solvers/dgsem_structured/dg_2d.jl b/src/solvers/dgsem_structured/dg_2d.jl
index 25a0eea096f..1eb244e06aa 100644
--- a/src/solvers/dgsem_structured/dg_2d.jl
+++ b/src/solvers/dgsem_structured/dg_2d.jl
@@ -6,7 +6,7 @@
#! format: noindent
function rhs!(du, u, t,
- mesh::StructuredMesh{2}, equations,
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2}}, equations,
initial_condition, boundary_conditions, source_terms::Source,
dg::DG, cache) where {Source}
# Reset du
@@ -58,8 +58,9 @@ See also https://github.com/trixi-framework/Trixi.jl/issues/1671#issuecomment-17
=#
@inline function weak_form_kernel!(du, u,
element,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}, T8codeMesh{2}},
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
+ UnstructuredMesh2D, P4estMesh{2},
+ T8codeMesh{2}},
nonconservative_terms::False, equations,
dg::DGSEM, cache, alpha = true)
# true * [some floating point value] == [exactly the same floating point value]
@@ -387,7 +388,7 @@ end
end
function calc_interface_flux!(cache, u,
- mesh::StructuredMesh{2},
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2}},
nonconservative_terms, # can be True/False
equations, surface_integral, dg::DG)
@unpack elements = cache
@@ -416,7 +417,8 @@ end
@inline function calc_interface_flux!(surface_flux_values, left_element, right_element,
orientation, u,
- mesh::StructuredMesh{2},
+ mesh::Union{StructuredMesh{2},
+ StructuredMeshView{2}},
nonconservative_terms::False, equations,
surface_integral, dg::DG, cache)
# This is slow for LSA, but for some reason faster for Euler (see #519)
@@ -551,13 +553,14 @@ end
# TODO: Taal dimension agnostic
function calc_boundary_flux!(cache, u, t, boundary_condition::BoundaryConditionPeriodic,
- mesh::StructuredMesh{2}, equations, surface_integral,
- dg::DG)
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2}},
+ equations, surface_integral, dg::DG)
@assert isperiodic(mesh)
end
function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple,
- mesh::StructuredMesh{2}, equations, surface_integral,
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2}},
+ equations, surface_integral,
dg::DG)
@unpack surface_flux_values = cache.elements
linear_indices = LinearIndices(size(mesh))
@@ -616,8 +619,8 @@ function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple,
end
function apply_jacobian!(du,
- mesh::Union{StructuredMesh{2}, UnstructuredMesh2D,
- P4estMesh{2}, T8codeMesh{2}},
+ mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
+ UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}},
equations, dg::DG, cache)
@unpack inverse_jacobian = cache.elements
diff --git a/src/solvers/dgsem_tree/dg_2d.jl b/src/solvers/dgsem_tree/dg_2d.jl
index 547ed352ef3..6b66c2d4bfa 100644
--- a/src/solvers/dgsem_tree/dg_2d.jl
+++ b/src/solvers/dgsem_tree/dg_2d.jl
@@ -180,8 +180,8 @@ end
function calc_volume_integral!(du, u,
mesh::Union{TreeMesh{2}, StructuredMesh{2},
- UnstructuredMesh2D, P4estMesh{2},
- T8codeMesh{2}},
+ StructuredMeshView{2}, UnstructuredMesh2D,
+ P4estMesh{2}, T8codeMesh{2}},
nonconservative_terms, equations,
volume_integral::VolumeIntegralWeakForm,
dg::DGSEM, cache)
@@ -1085,7 +1085,9 @@ end
return nothing
end
-function calc_surface_integral!(du, u, mesh::Union{TreeMesh{2}, StructuredMesh{2}},
+function calc_surface_integral!(du, u,
+ mesh::Union{TreeMesh{2}, StructuredMesh{2},
+ StructuredMeshView{2}},
equations, surface_integral::SurfaceIntegralWeakForm,
dg::DG, cache)
@unpack boundary_interpolation = dg.basis
diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl
index c3a792a0ffd..f095c97b19e 100644
--- a/test/test_structured_2d.jl
+++ b/test/test_structured_2d.jl
@@ -70,6 +70,30 @@ end
end
end
+@trixi_testset "elixir_advection_meshview.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_meshview.jl"),
+ l2=[
+ 8.311947673083206e-6,
+ 8.311947673068427e-6,
+ ],
+ linf=[
+ 6.627000273318195e-5,
+ 6.62700027264096e-5,
+ ],
+ coverage_override=(maxiters = 10^5,))
+
+ @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
+ end
+end
+
@trixi_testset "elixir_advection_extended.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
l2=[4.220397559713772e-6],
From d1c20c6fbc0d959f5657dea7041316ad29088e2f Mon Sep 17 00:00:00 2001
From: Johannes Markert <10619309+jmark@users.noreply.github.com>
Date: Fri, 10 May 2024 12:16:21 +0200
Subject: [PATCH 08/44] Feature: Read geometry information from t8code (#1900)
* Wrap from_abaqus routines.
* Implement geometry data transfer from t8code to Trixi.
* Updated examples.
* Fixed typos.
* Applied formatter.
* cubed sphere test case, copied from p4est
* add baroclinic instability (copy of p4est)
* add cubed sphere constructor
* Fix indentation.
* Fix indentation.
* Switching off formatter in two files.
* Upgrading T8code.jl.
* Fixed examples.
* stress different meaning of first argument
it refers to level of refinement in lat lon direction, not number of
tree as in the p4est version
* use lat lon levels
* add t8code cubed sphere tests
* Remove TODO comments
* Relaxing T8code.jl version requirement.
* Restricted t8code version requirement.
* Removed cubed spherical shell related code.
* Removed cubed spherical shell tests.
* Increasing code coverage.
* Increasing code coverage.
* Further increasing code coverage.
* Applied formatter.
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Benedict <135045760+benegee@users.noreply.github.com>
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Benedict <135045760+benegee@users.noreply.github.com>
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Benedict <135045760+benegee@users.noreply.github.com>
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Benedict <135045760+benegee@users.noreply.github.com>
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Benedict <135045760+benegee@users.noreply.github.com>
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
* Update test/test_t8code_2d.jl
Co-authored-by: Michael Schlottke-Lakemper
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Hendrik Ranocha
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Michael Schlottke-Lakemper
* Fixing minor stuff.
* Update src/meshes/t8code_mesh.jl
Co-authored-by: Michael Schlottke-Lakemper
* Appyling comments.
* Applied formatter.
* Reverting changes to original state.
---------
Co-authored-by: Johannes Markert
Co-authored-by: Benedict Geihe
Co-authored-by: Benedict <135045760+benegee@users.noreply.github.com>
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
Co-authored-by: Michael Schlottke-Lakemper
Co-authored-by: Hendrik Ranocha
---
Project.toml | 2 +-
.../elixir_advection_amr_unstructured_flag.jl | 9 +-
.../elixir_advection_nonconforming_flag.jl | 6 +-
.../elixir_advection_unstructured_flag.jl | 9 +-
.../elixir_euler_free_stream.jl | 7 +-
...e_terms_nonconforming_unstructured_flag.jl | 7 +-
examples/t8code_2d_dgsem/elixir_mhd_rotor.jl | 7 +-
...lixir_advection_amr_unstructured_curved.jl | 7 +-
.../elixir_advection_unstructured_curved.jl | 7 +-
examples/t8code_3d_dgsem/elixir_euler_ec.jl | 7 +-
.../elixir_euler_free_stream.jl | 7 +-
.../elixir_euler_free_stream_extruded.jl | 7 +-
...terms_nonconforming_unstructured_curved.jl | 7 +-
src/meshes/p4est_mesh.jl | 51 +-
src/meshes/t8code_mesh.jl | 473 ++++++++++++------
test/test_t8code_2d.jl | 31 +-
test/test_t8code_3d.jl | 13 +-
17 files changed, 411 insertions(+), 246 deletions(-)
diff --git a/Project.toml b/Project.toml
index 68f9060b9d3..812d01733a4 100644
--- a/Project.toml
+++ b/Project.toml
@@ -92,7 +92,7 @@ StaticArrays = "1.5"
StrideArrays = "0.1.26"
StructArrays = "0.6.11"
SummationByPartsOperators = "0.5.41"
-T8code = "0.4.3, 0.5"
+T8code = "0.5"
TimerOutputs = "0.5.7"
Triangulate = "2.2"
TriplotBase = "0.1"
diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl
index 0923e328487..9138586cccf 100644
--- a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl
+++ b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl
@@ -33,13 +33,8 @@ mapping_flag = Trixi.transfinite_mapping(faces)
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
joinpath(@__DIR__, "square_unstructured_2.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connecvity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(2))
-
-mesh = T8codeMesh(conn, polydeg = 3,
- mapping = mapping_flag,
+mesh = T8codeMesh(mesh_file, 2;
+ mapping = mapping_flag, polydeg = 3,
initial_refinement_level = 1)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
diff --git a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl
index a39f3a7e195..48f78dd6da3 100644
--- a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl
+++ b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl
@@ -18,14 +18,14 @@ f3(s) = SVector(s, -1.0 + sin(0.5 * pi * s))
f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s))
faces = (f1, f2, f3, f4)
-mapping = Trixi.transfinite_mapping(faces)
# Create T8codeMesh with 3 x 2 trees and 6 x 4 elements,
# approximate the geometry with a smaller polydeg for testing.
trees_per_dimension = (3, 2)
mesh = T8codeMesh(trees_per_dimension, polydeg = 3,
- mapping = mapping,
- initial_refinement_level = 1)
+ faces = faces,
+ initial_refinement_level = 1,
+ periodicity = (true, true))
# Note: This is actually a `p4est_quadrant_t` which is much bigger than the
# following struct. But we only need the first three fields for our purpose.
diff --git a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl
index ba8f1b59b80..e512f328234 100644
--- a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl
+++ b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl
@@ -30,13 +30,8 @@ mapping_flag = Trixi.transfinite_mapping(faces)
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
joinpath(@__DIR__, "square_unstructured_2.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connecvity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(2))
-
-mesh = T8codeMesh(conn, polydeg = 3,
- mapping = mapping_flag,
+mesh = T8codeMesh(mesh_file, 2;
+ mapping = mapping_flag, polydeg = 3,
initial_refinement_level = 2)
# A semidiscretization collects data structures and functions for the spatial discretization.
diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl
index 5e6c4193c50..d9d2c65d988 100644
--- a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl
+++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl
@@ -32,12 +32,7 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp",
joinpath(@__DIR__, "square_unstructured_1.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connecvity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(2))
-
-mesh = T8codeMesh(conn, polydeg = 3,
+mesh = T8codeMesh(mesh_file, 2; polydeg = 3,
mapping = mapping,
initial_refinement_level = 1)
diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl
index e496eb76729..48684071d4b 100644
--- a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl
+++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl
@@ -32,12 +32,7 @@ mapping_flag = Trixi.transfinite_mapping(faces)
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
joinpath(@__DIR__, "square_unstructured_2.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connecvity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(2))
-
-mesh = T8codeMesh(conn, polydeg = 3,
+mesh = T8codeMesh(mesh_file, 2; polydeg = 3,
mapping = mapping_flag,
initial_refinement_level = 1)
diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl
index ff2e40ae607..592d5b15a85 100644
--- a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl
+++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl
@@ -70,12 +70,7 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp",
joinpath(@__DIR__, "square_unstructured_2.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connecvity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(2))
-
-mesh = T8codeMesh(conn, polydeg = 4,
+mesh = T8codeMesh(mesh_file, 2; polydeg = 4,
mapping = mapping_twist,
initial_refinement_level = 1)
diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl
index e7c0f4b7318..1f9aa3449b0 100644
--- a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl
+++ b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl
@@ -50,12 +50,7 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp",
joinpath(@__DIR__, "cube_unstructured_2.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connectivity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(3))
-
-mesh = T8codeMesh(conn, polydeg = 2,
+mesh = T8codeMesh(mesh_file, 3; polydeg = 2,
mapping = mapping,
initial_refinement_level = 1)
diff --git a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl
index ee27ee117fe..fe6aa48e7d9 100644
--- a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl
+++ b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl
@@ -47,12 +47,7 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp",
joinpath(@__DIR__, "cube_unstructured_1.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connectivity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(3))
-
-mesh = T8codeMesh(conn, polydeg = 3,
+mesh = T8codeMesh(mesh_file, 3; polydeg = 3,
mapping = mapping,
initial_refinement_level = 2)
diff --git a/examples/t8code_3d_dgsem/elixir_euler_ec.jl b/examples/t8code_3d_dgsem/elixir_euler_ec.jl
index b720bfcd375..e1e4d850a86 100644
--- a/examples/t8code_3d_dgsem/elixir_euler_ec.jl
+++ b/examples/t8code_3d_dgsem/elixir_euler_ec.jl
@@ -47,12 +47,7 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp",
joinpath(@__DIR__, "cube_unstructured_2.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connectivity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(3))
-
-mesh = T8codeMesh(conn, polydeg = 5,
+mesh = T8codeMesh(mesh_file, 3; polydeg = 5,
mapping = mapping,
initial_refinement_level = 0)
diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl
index b70a6091adf..882e3aebebe 100644
--- a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl
+++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl
@@ -48,12 +48,7 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp",
joinpath(@__DIR__, "cube_unstructured_1.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connectivity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(3))
-
-mesh = T8codeMesh(conn, polydeg = 2,
+mesh = T8codeMesh(mesh_file, 3; polydeg = 2,
mapping = mapping,
initial_refinement_level = 0)
diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl
index 6ae38d20b5a..777cccf7ad7 100644
--- a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl
+++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl
@@ -37,12 +37,7 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp",
joinpath(@__DIR__, "cube_unstructured_2.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connecvity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(3))
-
-mesh = T8codeMesh(conn, polydeg = 3,
+mesh = T8codeMesh(mesh_file, 3; polydeg = 3,
mapping = mapping,
initial_refinement_level = 0)
diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl
index 6856be36ea1..a06e7927dd0 100644
--- a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl
+++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl
@@ -50,13 +50,8 @@ end
mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp",
joinpath(@__DIR__, "cube_unstructured_1.inp"))
-# INP mesh files are only support by p4est. Hence, we
-# create a p4est connecvity object first from which
-# we can create a t8code mesh.
-conn = Trixi.read_inp_p4est(mesh_file, Val(3))
-
# Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above).
-mesh = T8codeMesh(conn, polydeg = 2,
+mesh = T8codeMesh(mesh_file, 3; polydeg = 2,
mapping = mapping,
initial_refinement_level = 0)
diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl
index abe9d9345b5..6bb98196231 100644
--- a/src/meshes/p4est_mesh.jl
+++ b/src/meshes/p4est_mesh.jl
@@ -387,12 +387,44 @@ function P4estMesh{NDIMS}(meshfile::String;
p4est_partition_allow_for_coarsening)
end
+# Wrapper for `p4est_connectivity_from_hohqmesh_abaqus`. The latter is used
+# by `T8codeMesh`, too.
+function p4est_mesh_from_hohqmesh_abaqus(meshfile, initial_refinement_level,
+ n_dimensions, RealT)
+ connectivity, tree_node_coordinates, nodes, boundary_names = p4est_connectivity_from_hohqmesh_abaqus(meshfile,
+ initial_refinement_level,
+ n_dimensions,
+ RealT)
+
+ p4est = new_p4est(connectivity, initial_refinement_level)
+
+ return p4est, tree_node_coordinates, nodes, boundary_names
+end
+
+# Wrapper for `p4est_connectivity_from_standard_abaqus`. The latter is used
+# by `T8codeMesh`, too.
+function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg,
+ initial_refinement_level, n_dimensions, RealT,
+ boundary_symbols)
+ connectivity, tree_node_coordinates, nodes, boundary_names = p4est_connectivity_from_standard_abaqus(meshfile,
+ mapping,
+ polydeg,
+ initial_refinement_level,
+ n_dimensions,
+ RealT,
+ boundary_symbols)
+
+ p4est = new_p4est(connectivity, initial_refinement_level)
+
+ return p4est, tree_node_coordinates, nodes, boundary_names
+end
+
# Create the mesh connectivity, mapped node coordinates within each tree, reference nodes in [-1,1]
# and a list of boundary names for the `P4estMesh`. High-order boundary curve information as well as
# the boundary names on each tree are provided by the `meshfile` created by
# [`HOHQMesh.jl`](https://github.com/trixi-framework/HOHQMesh.jl).
-function p4est_mesh_from_hohqmesh_abaqus(meshfile, initial_refinement_level,
- n_dimensions, RealT)
+function p4est_connectivity_from_hohqmesh_abaqus(meshfile, initial_refinement_level,
+ n_dimensions, RealT)
# Create the mesh connectivity using `p4est`
connectivity = read_inp_p4est(meshfile, Val(n_dimensions))
connectivity_pw = PointerWrapper(connectivity)
@@ -440,18 +472,17 @@ function p4est_mesh_from_hohqmesh_abaqus(meshfile, initial_refinement_level,
file_idx += 1
end
- p4est = new_p4est(connectivity, initial_refinement_level)
-
- return p4est, tree_node_coordinates, nodes, boundary_names
+ return connectivity, tree_node_coordinates, nodes, boundary_names
end
# Create the mesh connectivity, mapped node coordinates within each tree, reference nodes in [-1,1]
# and a list of boundary names for the `P4estMesh`. The tree node coordinates are computed according to
# the `mapping` passed to this function using polynomial interpolants of degree `polydeg`. All boundary
# names are given the name `:all`.
-function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg,
- initial_refinement_level, n_dimensions, RealT,
- boundary_symbols)
+function p4est_connectivity_from_standard_abaqus(meshfile, mapping, polydeg,
+ initial_refinement_level, n_dimensions,
+ RealT,
+ boundary_symbols)
# Create the mesh connectivity using `p4est`
connectivity = read_inp_p4est(meshfile, Val(n_dimensions))
connectivity_pw = PointerWrapper(connectivity)
@@ -474,8 +505,6 @@ function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg,
calc_tree_node_coordinates!(tree_node_coordinates, nodes, mapping, vertices,
tree_to_vertex)
- p4est = new_p4est(connectivity, initial_refinement_level)
-
if boundary_symbols === nothing
# There's no simple and generic way to distinguish boundaries without any information given.
# Name all of them :all.
@@ -495,7 +524,7 @@ function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg,
Val(n_dimensions))
end
- return p4est, tree_node_coordinates, nodes, boundary_names
+ return connectivity, tree_node_coordinates, nodes, boundary_names
end
function parse_elements(meshfile, n_trees, n_dims)
diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl
index cb2ac787e14..0af4c6ae023 100644
--- a/src/meshes/t8code_mesh.jl
+++ b/src/meshes/t8code_mesh.jl
@@ -26,7 +26,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <:
nmpiinterfaces :: Int
nmpimortars :: Int
- function T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes,
+ function T8codeMesh{NDIMS}(forest::Ptr{t8_forest}, tree_node_coordinates, nodes,
boundary_names,
current_filename) where {NDIMS}
is_parallel = mpi_isparallel() ? True() : False()
@@ -100,6 +100,129 @@ function Base.show(io::IO, ::MIME"text/plain", mesh::T8codeMesh)
end
end
+"""
+ T8codeMesh{NDIMS, RealT}(forest, boundary_names; polydeg = 1, mapping = nothing)
+
+Main mesh constructor for the `T8codeMesh` wrapping around a given t8code
+`forest` object. This constructor is typically called by other `T8codeMesh`
+constructors.
+
+# Arguments
+- `forest`: Pointer to a t8code forest.
+- `boundary_names`: List of boundary names.
+- `polydeg::Integer`: Polynomial degree used to store the geometry of the mesh.
+ The mapping will be approximated by an interpolation polynomial
+ of the specified degree for each tree.
+- `mapping`: A function of `NDIMS` variables to describe the mapping that transforms
+ the imported mesh to the physical domain. Use `nothing` for the identity map.
+"""
+function T8codeMesh{NDIMS, RealT}(forest::Ptr{t8_forest}, boundary_names; polydeg = 1,
+ mapping = nothing) where {NDIMS, RealT}
+ # In t8code reference space is [0,1].
+ basis = LobattoLegendreBasis(RealT, polydeg)
+ nodes = 0.5 .* (basis.nodes .+ 1.0)
+
+ cmesh = t8_forest_get_cmesh(forest)
+ number_of_trees = t8_forest_get_num_global_trees(forest)
+
+ tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS,
+ ntuple(_ -> length(nodes), NDIMS)...,
+ number_of_trees)
+
+ reference_coordinates = Vector{Float64}(undef, 3)
+
+ # Calculate node coordinates of reference mesh.
+ if NDIMS == 2
+ number_of_corners = 4 # quadrilateral
+
+ # Testing for negative element volumes.
+ vertices = zeros(3, number_of_corners)
+ for itree in 1:number_of_trees
+ vertices_pointer = t8_cmesh_get_tree_vertices(cmesh, itree - 1)
+
+ # Note, `vertices = unsafe_wrap(Array, vertices_pointer, (3, 1 << NDIMS))`
+ # sometimes does not work since `vertices_pointer` is not necessarily properly
+ # aligned to 8 bytes.
+ for icorner in 1:number_of_corners
+ vertices[1, icorner] = unsafe_load(vertices_pointer, (icorner - 1) * 3 + 1)
+ vertices[2, icorner] = unsafe_load(vertices_pointer, (icorner - 1) * 3 + 2)
+ end
+
+ # Check if tree's node ordering is right-handed or print a warning.
+ let z = zero(eltype(vertices)), o = one(eltype(vertices))
+ u = vertices[:, 2] - vertices[:, 1]
+ v = vertices[:, 3] - vertices[:, 1]
+ w = [z, z, o]
+
+ # Triple product gives signed volume of spanned parallelepiped.
+ vol = dot(cross(u, v), w)
+
+ if vol < z
+ error("Discovered negative volumes in `cmesh`: vol = $vol")
+ end
+ end
+
+ # Query geometry data from t8code.
+ for j in eachindex(nodes), i in eachindex(nodes)
+ reference_coordinates[1] = nodes[i]
+ reference_coordinates[2] = nodes[j]
+ reference_coordinates[3] = 0.0
+ t8_geometry_evaluate(cmesh, itree - 1, reference_coordinates, 1,
+ @view(tree_node_coordinates[:, i, j, itree]))
+ end
+ end
+
+ elseif NDIMS == 3
+ number_of_corners = 8 # hexahedron
+
+ # Testing for negative element volumes.
+ vertices = zeros(3, number_of_corners)
+ for itree in 1:number_of_trees
+ vertices_pointer = t8_cmesh_get_tree_vertices(cmesh, itree - 1)
+
+ # Note, `vertices = unsafe_wrap(Array, vertices_pointer, (3, 1 << NDIMS))`
+ # sometimes does not work since `vertices_pointer` is not necessarily properly
+ # aligned to 8 bytes.
+ for icorner in 1:number_of_corners
+ vertices[1, icorner] = unsafe_load(vertices_pointer, (icorner - 1) * 3 + 1)
+ vertices[2, icorner] = unsafe_load(vertices_pointer, (icorner - 1) * 3 + 2)
+ vertices[3, icorner] = unsafe_load(vertices_pointer, (icorner - 1) * 3 + 3)
+ end
+
+ # Check if tree's node ordering is right-handed or print a warning.
+ let z = zero(eltype(vertices))
+ u = vertices[:, 2] - vertices[:, 1]
+ v = vertices[:, 3] - vertices[:, 1]
+ w = vertices[:, 5] - vertices[:, 1]
+
+ # Triple product gives signed volume of spanned parallelepiped.
+ vol = dot(cross(u, v), w)
+
+ if vol < z
+ error("Discovered negative volumes in `cmesh`: vol = $vol")
+ end
+ end
+
+ # Query geometry data from t8code.
+ for k in eachindex(nodes), j in eachindex(nodes), i in eachindex(nodes)
+ reference_coordinates[1] = nodes[i]
+ reference_coordinates[2] = nodes[j]
+ reference_coordinates[3] = nodes[k]
+ t8_geometry_evaluate(cmesh, itree - 1, reference_coordinates, 1,
+ @view(tree_node_coordinates[:, i, j, k, itree]))
+ end
+ end
+ else
+ throw(ArgumentError("$NDIMS dimensions are not supported."))
+ end
+
+ # Apply user defined mapping.
+ map_node_coordinates!(tree_node_coordinates, mapping)
+
+ return T8codeMesh{NDIMS}(forest, tree_node_coordinates, basis.nodes,
+ boundary_names, "")
+end
+
"""
T8codeMesh(trees_per_dimension; polydeg, mapping=identity,
RealT=Float64, initial_refinement_level=0, periodicity=true)
@@ -187,57 +310,10 @@ function T8codeMesh(trees_per_dimension; polydeg = 1,
forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost,
mpi_comm())
- basis = LobattoLegendreBasis(RealT, polydeg)
- nodes = basis.nodes
-
- num_trees = t8_cmesh_get_num_trees(cmesh)
-
- tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS,
- ntuple(_ -> length(nodes), NDIMS)...,
- num_trees)
-
- # Get cell length in reference mesh: Omega_ref = [-1,1]^NDIMS.
- dx = [2 / n for n in trees_per_dimension]
-
# Non-periodic boundaries.
boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension))
- if mapping === nothing
- mapping_ = coordinates2mapping(ntuple(_ -> -1.0, NDIMS), ntuple(_ -> 1.0, NDIMS))
- else
- mapping_ = mapping
- end
-
- for itree in 1:num_trees
- veptr = t8_cmesh_get_tree_vertices(cmesh, itree - 1)
- verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))
-
- # Calculate node coordinates of reference mesh.
- if NDIMS == 2
- cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1]
- cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2]
-
- for j in eachindex(nodes), i in eachindex(nodes)
- tree_node_coordinates[:, i, j, itree] .= mapping_(cell_x_offset +
- dx[1] * nodes[i] / 2,
- cell_y_offset +
- dx[2] * nodes[j] / 2)
- end
- elseif NDIMS == 3
- cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1]
- cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2]
- cell_z_offset = (verts[3, 1] - 0.5 * (trees_per_dimension[3] - 1)) * dx[3]
-
- for k in eachindex(nodes), j in eachindex(nodes), i in eachindex(nodes)
- tree_node_coordinates[:, i, j, k, itree] .= mapping_(cell_x_offset +
- dx[1] * nodes[i] / 2,
- cell_y_offset +
- dx[2] * nodes[j] / 2,
- cell_z_offset +
- dx[3] * nodes[k] / 2)
- end
- end
-
+ for itree in 1:t8_forest_get_num_global_trees(forest)
if !periodicity[1]
boundary_names[1, itree] = :x_neg
boundary_names[2, itree] = :x_pos
@@ -256,8 +332,13 @@ function T8codeMesh(trees_per_dimension; polydeg = 1,
end
end
- return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes,
- boundary_names, "")
+ # Note, `p*est_connectivity_new_brick` converts a domain of `[0,nx] x [0,ny] x ....`.
+ # Hence, transform mesh coordinates to reference space [-1,1]^NDIMS before applying user defined mapping.
+ mapping_(xyz...) = mapping((x * 2.0 / tpd - 1.0 for (x, tpd) in zip(xyz,
+ trees_per_dimension))...)
+
+ return T8codeMesh{NDIMS, RealT}(forest, boundary_names; polydeg = polydeg,
+ mapping = mapping_)
end
"""
@@ -295,106 +376,11 @@ function T8codeMesh(cmesh::Ptr{t8_cmesh};
forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost,
mpi_comm())
- basis = LobattoLegendreBasis(RealT, polydeg)
- nodes = basis.nodes
-
- num_trees = t8_cmesh_get_num_trees(cmesh)
-
- tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS,
- ntuple(_ -> length(nodes), NDIMS)...,
- num_trees)
-
- nodes_in = [-1.0, 1.0]
- matrix = polynomial_interpolation_matrix(nodes_in, nodes)
-
- num_local_trees = t8_cmesh_get_num_local_trees(cmesh)
-
- if NDIMS == 2
- data_in = Array{RealT, 3}(undef, 2, 2, 2)
- tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in))
- verts = zeros(3, 4)
-
- for itree in 0:(num_local_trees - 1)
- veptr = t8_cmesh_get_tree_vertices(cmesh, itree)
-
- # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))`
- # sometimes does not work since `veptr` is not necessarily properly
- # aligned to 8 bytes.
- for icorner in 1:4
- verts[1, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 1)
- verts[2, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 2)
- end
-
- # Check if tree's node ordering is right-handed or print a warning.
- let z = zero(eltype(verts)), o = one(eltype(verts))
- u = verts[:, 2] - verts[:, 1]
- v = verts[:, 3] - verts[:, 1]
- w = [z, z, o]
-
- # Triple product gives signed volume of spanned parallelepiped.
- vol = dot(cross(u, v), w)
-
- if vol < z
- @warn "Discovered negative volumes in `cmesh`: vol = $vol"
- end
- end
-
- # Tree vertices are stored in z-order.
- @views data_in[:, 1, 1] .= verts[1:2, 1]
- @views data_in[:, 2, 1] .= verts[1:2, 2]
- @views data_in[:, 1, 2] .= verts[1:2, 3]
- @views data_in[:, 2, 2] .= verts[1:2, 4]
-
- # Interpolate corner coordinates to specified nodes.
- multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, itree + 1),
- matrix, matrix,
- data_in,
- tmp1)
- end
-
- elseif NDIMS == 3
- data_in = Array{RealT, 4}(undef, 3, 2, 2, 2)
- tmp1 = zeros(RealT, 3, length(nodes), length(nodes_in), length(nodes_in))
- verts = zeros(3, 8)
-
- for itree in 0:(num_trees - 1)
- veptr = t8_cmesh_get_tree_vertices(cmesh, itree)
-
- # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))`
- # sometimes does not work since `veptr` is not necessarily properly
- # aligned to 8 bytes.
- for icorner in 1:8
- verts[1, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 1)
- verts[2, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 2)
- verts[3, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 3)
- end
-
- # Tree vertices are stored in z-order.
- @views data_in[:, 1, 1, 1] .= verts[1:3, 1]
- @views data_in[:, 2, 1, 1] .= verts[1:3, 2]
- @views data_in[:, 1, 2, 1] .= verts[1:3, 3]
- @views data_in[:, 2, 2, 1] .= verts[1:3, 4]
-
- @views data_in[:, 1, 1, 2] .= verts[1:3, 5]
- @views data_in[:, 2, 1, 2] .= verts[1:3, 6]
- @views data_in[:, 1, 2, 2] .= verts[1:3, 7]
- @views data_in[:, 2, 2, 2] .= verts[1:3, 8]
-
- # Interpolate corner coordinates to specified nodes.
- multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, :, itree + 1),
- matrix, matrix, matrix,
- data_in,
- tmp1)
- end
- end
-
- map_node_coordinates!(tree_node_coordinates, mapping)
-
- # There's no simple and generic way to distinguish boundaries. Name all of them :all.
- boundary_names = fill(:all, 2 * NDIMS, num_trees)
+ # There's no simple and generic way to distinguish boundaries, yet. Name all of them :all.
+ boundary_names = fill(:all, 2 * NDIMS, t8_cmesh_get_num_trees(cmesh))
- return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes,
- boundary_names, "")
+ return T8codeMesh{NDIMS, RealT}(forest, boundary_names; polydeg = polydeg,
+ mapping = mapping)
end
"""
@@ -445,36 +431,201 @@ function T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...)
return T8codeMesh(cmesh; kwargs...)
end
+# Convenience types for multiple dispatch. Only used in this file.
+struct GmshFile{NDIMS}
+ path::String
+end
+
+struct AbaqusFile{NDIMS}
+ path::String
+end
+
"""
- T8codeMesh(meshfile::String, ndims; kwargs...)
+ T8codeMesh(meshfile::String, NDIMS; kwargs...)
Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming
-mesh from a Gmsh mesh file (`.msh`).
+mesh from either a Gmsh mesh file (`.msh`) or Abaqus mesh file (`.inp`) which is determined
+by the file extension.
# Arguments
-- `meshfile::String`: path to a Gmsh mesh file.
+- `filepath::String`: path to a Gmsh or Abaqus mesh file.
- `ndims`: Mesh file dimension: `2` or `3`.
-- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms
+
+# Optional Keyword Arguments
+- `mapping`: A function of `NDIMS` variables to describe the mapping that transforms
the imported mesh to the physical domain. Use `nothing` for the identity map.
-- `polydeg::Integer`: polynomial degree used to store the geometry of the mesh.
+- `polydeg::Integer`: Polynomial degree used to store the geometry of the mesh.
The mapping will be approximated by an interpolation polynomial
of the specified degree for each tree.
The default of `1` creates an uncurved geometry. Use a higher value if the mapping
will curve the imported uncurved mesh.
-- `RealT::Type`: the type that should be used for coordinates.
-- `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts.
+- `RealT::Type`: The type that should be used for coordinates.
+- `initial_refinement_level::Integer`: Refine the mesh uniformly to this level before the simulation starts.
"""
-function T8codeMesh(meshfile::String, ndims; kwargs...)
+function T8codeMesh(filepath::String, ndims; kwargs...)
# Prevent `t8code` from crashing Julia if the file doesn't exist.
- @assert isfile(meshfile)
+ @assert isfile(filepath)
+
+ meshfile_prefix, meshfile_suffix = splitext(filepath)
+
+ file_extension = lowercase(meshfile_suffix)
+
+ if file_extension == ".msh"
+ return T8codeMesh(GmshFile{ndims}(filepath); kwargs...)
+ end
+
+ if file_extension == ".inp"
+ return T8codeMesh(AbaqusFile{ndims}(filepath); kwargs...)
+ end
+
+ throw(ArgumentError("Unknown file extension: " * file_extension))
+end
- meshfile_prefix, meshfile_suffix = splitext(meshfile)
+"""
+ T8codeMesh(meshfile::GmshFile{NDIMS}; kwargs...)
- cmesh = t8_cmesh_from_msh_file(meshfile_prefix, 0, mpi_comm(), ndims, 0, 0)
+Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming
+mesh from a Gmsh mesh file (`.msh`).
+
+# Arguments
+- `meshfile::GmshFile{NDIMS}`: Gmsh mesh file object of dimension NDIMS and give `path` to the file.
+
+# Optional Keyword Arguments
+- `mapping`: A function of `NDIMS` variables to describe the mapping that transforms
+ the imported mesh to the physical domain. Use `nothing` for the identity map.
+- `polydeg::Integer`: Polynomial degree used to store the geometry of the mesh.
+ The mapping will be approximated by an interpolation polynomial
+ of the specified degree for each tree.
+ The default of `1` creates an uncurved geometry. Use a higher value if the mapping
+ will curve the imported uncurved mesh.
+- `RealT::Type`: The type that should be used for coordinates.
+- `initial_refinement_level::Integer`: Refine the mesh uniformly to this level before the simulation starts.
+"""
+function T8codeMesh(meshfile::GmshFile{NDIMS}; kwargs...) where {NDIMS}
+ # Prevent `t8code` from crashing Julia if the file doesn't exist.
+ @assert isfile(meshfile.path)
+
+ meshfile_prefix, meshfile_suffix = splitext(meshfile.path)
+
+ cmesh = t8_cmesh_from_msh_file(meshfile_prefix, 0, mpi_comm(), NDIMS, 0, 0)
return T8codeMesh(cmesh; kwargs...)
end
+"""
+ T8codeMesh(meshfile::AbaqusFile{NDIMS};
+ mapping=nothing, polydeg=1, RealT=Float64,
+ initial_refinement_level=0, unsaved_changes=true,
+ boundary_symbols = nothing)
+
+Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming
+mesh from an Abaqus mesh file (`.inp`).
+
+To create a curved unstructured `T8codeMesh` two strategies are available:
+
+- `HOHQMesh Abaqus`: High-order, curved boundary information created by
+ [`HOHQMesh.jl`](https://github.com/trixi-framework/HOHQMesh.jl) is
+ available in the `meshfile`. The mesh polynomial degree `polydeg`
+ of the boundaries is provided from the `meshfile`. The computation of
+ the mapped tree coordinates is done with transfinite interpolation
+ with linear blending similar to [`UnstructuredMesh2D`](@ref). Boundary name
+ information is also parsed from the `meshfile` such that different boundary
+ conditions can be set at each named boundary on a given tree.
+
+- `Standard Abaqus`: By default, with `mapping=nothing` and `polydeg=1`, this creates a
+ straight-sided mesh from the information parsed from the `meshfile`. If a mapping
+ function is specified then it computes the mapped tree coordinates via polynomial
+ interpolants with degree `polydeg`. The mesh created by this function will only
+ have one boundary `:all` if `boundary_symbols` is not specified.
+ If `boundary_symbols` is specified the mesh file will be parsed for nodesets defining
+ the boundary nodes from which boundary edges (2D) and faces (3D) will be assigned.
+
+Note that the `mapping` and `polydeg` keyword arguments are only used by the `HOHQMesh Abaqus` option.
+The `Standard Abaqus` routine obtains the mesh `polydeg` directly from the `meshfile`
+and constructs the transfinite mapping internally.
+
+The particular strategy is selected according to the header present in the `meshfile` where
+the constructor checks whether or not the `meshfile` was created with
+[HOHQMesh.jl](https://github.com/trixi-framework/HOHQMesh.jl).
+If the Abaqus file header is not present in the `meshfile` then the `T8codeMesh` is created
+by `Standard Abaqus`.
+
+The default keyword argument `initial_refinement_level=0` corresponds to a forest
+where the number of trees is the same as the number of elements in the original `meshfile`.
+Increasing the `initial_refinement_level` allows one to uniformly refine the base mesh given
+in the `meshfile` to create a forest with more trees before the simulation begins.
+For example, if a two-dimensional base mesh contains 25 elements then setting
+`initial_refinement_level=1` creates an initial forest of `2^2 * 25 = 100` trees.
+
+# Arguments
+- `meshfile::AbaqusFile{NDIMS}`: Abaqus mesh file object of dimension NDIMS and given `path` to the file.
+
+# Optional Keyword Arguments
+- `mapping`: A function of `NDIMS` variables to describe the mapping that transforms
+ the imported mesh to the physical domain. Use `nothing` for the identity map.
+- `polydeg::Integer`: Polynomial degree used to store the geometry of the mesh.
+ The mapping will be approximated by an interpolation polynomial
+ of the specified degree for each tree.
+ The default of `1` creates an uncurved geometry. Use a higher value if the mapping
+ will curve the imported uncurved mesh.
+- `RealT::Type`: The type that should be used for coordinates.
+- `initial_refinement_level::Integer`: Refine the mesh uniformly to this level before the simulation starts.
+- `boundary_symbols::Vector{Symbol}`: A vector of symbols that correspond to the boundary names in the `meshfile`.
+ If `nothing` is passed then all boundaries are named `:all`.
+"""
+function T8codeMesh(meshfile::AbaqusFile{NDIMS};
+ mapping = nothing, polydeg = 1, RealT = Float64,
+ initial_refinement_level = 0,
+ boundary_symbols = nothing) where {NDIMS}
+ # Prevent `t8code` from crashing Julia if the file doesn't exist.
+ @assert isfile(meshfile.path)
+
+ # Read in the Header of the meshfile to determine which constructor is appropriate.
+ header = open(meshfile.path, "r") do io
+ readline(io) # Header of the Abaqus file; discarded
+ readline(io) # Read in the actual header information
+ end
+
+ # Check if the meshfile was generated using HOHQMesh.
+ if header == " File created by HOHQMesh"
+ # Mesh curvature and boundary naming is handled with additional information available in meshfile
+ connectivity, tree_node_coordinates, nodes, boundary_names = p4est_connectivity_from_hohqmesh_abaqus(meshfile.path,
+ initial_refinement_level,
+ NDIMS,
+ RealT)
+ # Apply user defined mapping.
+ map_node_coordinates!(tree_node_coordinates, mapping)
+ else
+ # Mesh curvature is handled directly by applying the mapping keyword argument.
+ connectivity, tree_node_coordinates, nodes, boundary_names = p4est_connectivity_from_standard_abaqus(meshfile.path,
+ mapping,
+ polydeg,
+ initial_refinement_level,
+ NDIMS,
+ RealT,
+ boundary_symbols)
+ end
+
+ cmesh = t8_cmesh_new_from_connectivity(connectivity, mpi_comm())
+ p4est_connectivity_destroy(connectivity)
+
+ do_face_ghost = mpi_isparallel()
+ scheme = t8_scheme_new_default_cxx()
+ forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost,
+ mpi_comm())
+
+ return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes,
+ boundary_names, "")
+end
+
+function t8_cmesh_new_from_connectivity(connectivity::Ptr{p4est_connectivity}, comm)
+ return t8_cmesh_new_from_p4est(connectivity, comm, 0)
+end
+
+function t8_cmesh_new_from_connectivity(connectivity::Ptr{p8est_connectivity}, comm)
+ return t8_cmesh_new_from_p8est(connectivity, comm, 0)
+end
+
struct adapt_callback_passthrough
adapt_callback::Function
user_data::Any
diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl
index d536a6dd73a..b63d2a105ac 100644
--- a/test/test_t8code_2d.jl
+++ b/test/test_t8code_2d.jl
@@ -30,8 +30,16 @@ mkdir(outdir)
end
end
+@trixi_testset "test load mesh from path" begin
+ mktempdir() do path
+ @test_throws "Unknown file extension: .unknown_ext" begin
+ mesh = T8codeMesh(touch(joinpath(path, "dummy.unknown_ext")), 2)
+ end
+ end
+end
+
@trixi_testset "test check_for_negative_volumes" begin
- @test_warn "Discovered negative volumes" begin
+ @test_throws "Discovered negative volumes" begin
# Unstructured mesh with six cells which have left-handed node ordering.
mesh_file = Trixi.download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh",
joinpath(EXAMPLES_DIR,
@@ -42,6 +50,27 @@ end
end
end
+@trixi_testset "test t8code mesh from p4est connectivity" begin
+ @test begin
+ # Here we use the connectivity constructor from `P4est.jl` since the
+ # method dispatch works only on `Ptr{p4est_connectivity}` which
+ # actually is `Ptr{P4est.LibP4est.p4est_connectivity}`.
+ conn = Trixi.P4est.LibP4est.p4est_connectivity_new_brick(2, 3, 1, 1)
+ mesh = T8codeMesh(conn)
+ all(size(mesh.tree_node_coordinates) .== (2, 2, 2, 6))
+ end
+end
+
+@trixi_testset "test t8code mesh from ABAQUS HOHQMesh file" begin
+ @test begin
+ # Unstructured ABAQUS mesh file created with HOHQMesh..
+ file_path = Trixi.download("https://gist.githubusercontent.com/jmark/9e0da4306e266617eeb19bc56b0e7feb/raw/e6856e1deb648a807f6bb6d6dcacff9e55d94e2a/round_2d_tank.inp",
+ joinpath(EXAMPLES_DIR, "round_2d_tank.inp"))
+ mesh = T8codeMesh(file_path, 2)
+ all(size(mesh.tree_node_coordinates) .== (2, 4, 4, 340))
+ end
+end
+
@trixi_testset "elixir_advection_basic.jl" begin
# This test is identical to the one in `test_p4est_2d.jl`.
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"),
diff --git a/test/test_t8code_3d.jl b/test/test_t8code_3d.jl
index 4232cf04094..81d2a7cdd85 100644
--- a/test/test_t8code_3d.jl
+++ b/test/test_t8code_3d.jl
@@ -13,6 +13,17 @@ isdir(outdir) && rm(outdir, recursive = true)
mkdir(outdir)
@testset "T8codeMesh3D" begin
+ @trixi_testset "test t8code mesh from p8est connectivity" begin
+ @test begin
+ # Here we use the connectivity constructor from `P4est.jl` since the
+ # method dispatch works only on `Ptr{p8est_connectivity}` which
+ # actually is `Ptr{P4est.LibP4est.p8est_connectivity}`.
+ conn = Trixi.P4est.LibP4est.p8est_connectivity_new_brick(2, 3, 4, 1, 1, 1)
+ mesh = T8codeMesh(conn)
+ all(size(mesh.tree_node_coordinates) .== (3, 2, 2, 2, 24))
+ end
+ end
+
# This test is identical to the one in `test_p4est_3d.jl`.
@trixi_testset "elixir_advection_basic.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"),
@@ -202,7 +213,7 @@ mkdir(outdir)
3.3228975127030935e-13,
9.592326932761353e-13,
],
- tspan=(0.0, 0.1))
+ tspan=(0.0, 0.1), atol=5.0e-13,)
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
From 9eaf8fc52b1f7ce581a59926f11826c858f88cfd Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 10 May 2024 13:04:50 +0200
Subject: [PATCH 09/44] set version to v0.7.12
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 812d01733a4..2323d2c1995 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.12-pre"
+version = "0.7.12"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From c221bca89b38d416fb49137b1b266cecd1646b52 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 10 May 2024 13:05:03 +0200
Subject: [PATCH 10/44] set development version to v0.7.13-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 2323d2c1995..769c1fe7d08 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.12"
+version = "0.7.13-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 9b64eab3e3b0089ff6e38346d3ed46e13915cf72 Mon Sep 17 00:00:00 2001
From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com>
Date: Tue, 14 May 2024 10:03:37 +0200
Subject: [PATCH 11/44] Outsource saving of bounds check errors (#1824)
* Outsource saving of bounds check errors
* Increase coverage by activate saving of bounds check errors for local limiting
* Fix test for saving errors with coverage
* Remove empty line
* Implement suggestions
* Use `joinpath`
* Add test with coverage
* Fix I/O test for modified elixir
* Add more accurate test
* Redo last change; Default maxiters with coverage is 1
* Change order of lines
* Save deviations if maxiters is reached
* Add comment
* Save deviations at correct time steps
* Add test for saving deviations for positivity limiter (non-linear)
* Add comment
---------
Co-authored-by: Michael Schlottke-Lakemper
---
.../src/files/subcell_shock_capturing.jl | 11 ++-
...kelvin_helmholtz_instability_sc_subcell.jl | 4 +-
...lixir_euler_sedov_blast_wave_sc_subcell.jl | 4 +-
...ubble_shockcapturing_subcell_positivity.jl | 5 +-
src/callbacks_stage/subcell_bounds_check.jl | 38 +++++-----
.../subcell_bounds_check_2d.jl | 70 ++++++++++---------
test/test_tree_2d_euler.jl | 25 ++++++-
test/test_tree_2d_eulermulti.jl | 7 +-
8 files changed, 96 insertions(+), 68 deletions(-)
diff --git a/docs/literate/src/files/subcell_shock_capturing.jl b/docs/literate/src/files/subcell_shock_capturing.jl
index 8a98fdae283..b4d08965361 100644
--- a/docs/literate/src/files/subcell_shock_capturing.jl
+++ b/docs/literate/src/files/subcell_shock_capturing.jl
@@ -277,11 +277,10 @@ plot(sol)
# timestep and simulation time.
# ````
# iter, simu_time, rho_min, rho_max
-# 1, 0.0, 0.0, 0.0
-# 101, 0.29394033217556337, 0.0, 0.0
-# 201, 0.6012597465597065, 0.0, 0.0
-# 301, 0.9559096690030839, 0.0, 0.0
-# 401, 1.3674274981949077, 0.0, 0.0
-# 501, 1.8395301696603052, 0.0, 0.0
+# 100, 0.29103427131404924, 0.0, 0.0
+# 200, 0.5980281923063808, 0.0, 0.0
+# 300, 0.9520853560765293, 0.0, 0.0
+# 400, 1.3630295622683186, 0.0, 0.0
+# 500, 1.8344999624013498, 0.0, 0.0
# 532, 1.9974179806990118, 0.0, 0.0
# ````
diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl
index 1817672778a..9e9fb45e7d1 100644
--- a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl
+++ b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl
@@ -83,7 +83,9 @@ callbacks = CallbackSet(summary_callback,
###############################################################################
# run the simulation
-stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback(save_errors = false))
+stage_callbacks = (SubcellLimiterIDPCorrection(),
+ BoundsCheckCallback(save_errors = false, interval = 100))
+# `interval` is used when calling this elixir in the tests with `save_errors=true`.
sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks);
dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
diff --git a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
index 6cbbe4eb4e6..2089d35397d 100644
--- a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
+++ b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
@@ -85,7 +85,9 @@ callbacks = CallbackSet(summary_callback,
###############################################################################
# run the simulation
-stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback(save_errors = false))
+stage_callbacks = (SubcellLimiterIDPCorrection(),
+ BoundsCheckCallback(save_errors = false, interval = 100))
+# `interval` is used when calling this elixir in the tests with `save_errors=true`.
sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks);
dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
index 78ff47e255f..be14c448e4d 100644
--- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
+++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
@@ -133,10 +133,9 @@ callbacks = CallbackSet(summary_callback,
###############################################################################
# run the simulation
-output_directory = "out"
stage_callbacks = (SubcellLimiterIDPCorrection(),
- BoundsCheckCallback(save_errors = true, interval = 100,
- output_directory = output_directory))
+ BoundsCheckCallback(save_errors = false, interval = 100))
+# `interval` is used when calling this elixir in the tests with `save_errors=true`.
sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks);
dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl
index ba193ab2997..3f3e151436f 100644
--- a/src/callbacks_stage/subcell_bounds_check.jl
+++ b/src/callbacks_stage/subcell_bounds_check.jl
@@ -38,35 +38,37 @@ function (callback::BoundsCheckCallback)(u_ode, integrator, stage)
(; t, iter, alg) = integrator
u = wrap_array(u_ode, mesh, equations, solver, cache)
+ @trixi_timeit timer() "check_bounds" check_bounds(u, mesh, equations, solver, cache,
+ solver.volume_integral)
+
save_errors = callback.save_errors && (callback.interval > 0) &&
(stage == length(alg.c)) &&
- (iter % callback.interval == 0 || integrator.finalstep)
- @trixi_timeit timer() "check_bounds" check_bounds(u, mesh, equations, solver, cache,
- solver.volume_integral, t,
- iter + 1,
- callback.output_directory,
- save_errors)
+ ((iter + 1) % callback.interval == 0 || # Every `interval` time steps
+ integrator.finalstep || # Planned last time step
+ (iter + 1) >= integrator.opts.maxiters) # Maximum iterations reached
+ if save_errors
+ @trixi_timeit timer() "save_errors" save_bounds_check_errors(callback.output_directory,
+ u, t, iter + 1,
+ equations,
+ solver.volume_integral)
+ end
end
-function check_bounds(u, mesh, equations, solver, cache,
- volume_integral::AbstractVolumeIntegral, t, iter,
- output_directory, save_errors)
- return nothing
+@inline function check_bounds(u, mesh, equations, solver, cache,
+ volume_integral::VolumeIntegralSubcellLimiting)
+ check_bounds(u, mesh, equations, solver, cache, volume_integral.limiter)
end
-function check_bounds(u, mesh, equations, solver, cache,
- volume_integral::VolumeIntegralSubcellLimiting, t, iter,
- output_directory, save_errors)
- check_bounds(u, mesh, equations, solver, cache, volume_integral.limiter, t, iter,
- output_directory, save_errors)
+@inline function save_bounds_check_errors(output_directory, u, t, iter, equations,
+ volume_integral::VolumeIntegralSubcellLimiting)
+ save_bounds_check_errors(output_directory, u, t, iter, equations,
+ volume_integral.limiter)
end
function init_callback(callback::BoundsCheckCallback, semi)
init_callback(callback, semi, semi.solver.volume_integral)
end
-init_callback(callback::BoundsCheckCallback, semi, volume_integral::AbstractVolumeIntegral) = nothing
-
function init_callback(callback::BoundsCheckCallback, semi,
volume_integral::VolumeIntegralSubcellLimiting)
init_callback(callback, semi, volume_integral.limiter)
@@ -116,8 +118,6 @@ function finalize_callback(callback::BoundsCheckCallback, semi)
finalize_callback(callback, semi, semi.solver.volume_integral)
end
-finalize_callback(callback::BoundsCheckCallback, semi, volume_integral::AbstractVolumeIntegral) = nothing
-
function finalize_callback(callback::BoundsCheckCallback, semi,
volume_integral::VolumeIntegralSubcellLimiting)
finalize_callback(callback, semi, volume_integral.limiter)
diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl
index 0f713a296e2..c81ebc970a0 100644
--- a/src/callbacks_stage/subcell_bounds_check_2d.jl
+++ b/src/callbacks_stage/subcell_bounds_check_2d.jl
@@ -6,8 +6,7 @@
#! format: noindent
@inline function check_bounds(u, mesh::AbstractMesh{2}, equations, solver, cache,
- limiter::SubcellLimiterIDP,
- time, iter, output_directory, save_errors)
+ limiter::SubcellLimiterIDP)
(; local_twosided, positivity, local_onesided) = solver.volume_integral.limiter
(; variable_bounds) = limiter.cache.subcell_limiter_coefficients
(; idp_bounds_delta_local, idp_bounds_delta_global) = limiter.cache
@@ -101,42 +100,47 @@
idp_bounds_delta_local[key])
end
- if save_errors
- # Print to output file
- open("$output_directory/deviations.txt", "a") do f
- print(f, iter, ", ", time)
- if local_twosided
- for v in limiter.local_twosided_variables_cons
- v_string = string(v)
- print(f, ", ", idp_bounds_delta_local[Symbol(v_string, "_min")],
- ", ", idp_bounds_delta_local[Symbol(v_string, "_max")])
- end
+ return nothing
+end
+
+@inline function save_bounds_check_errors(output_directory, u, time, iter, equations,
+ limiter::SubcellLimiterIDP)
+ (; local_twosided, positivity, local_onesided) = limiter
+ (; idp_bounds_delta_local) = limiter.cache
+
+ # Print to output file
+ open(joinpath(output_directory, "deviations.txt"), "a") do f
+ print(f, iter, ", ", time)
+ if local_twosided
+ for v in limiter.local_twosided_variables_cons
+ v_string = string(v)
+ print(f, ", ", idp_bounds_delta_local[Symbol(v_string, "_min")],
+ ", ", idp_bounds_delta_local[Symbol(v_string, "_max")])
end
- if local_onesided
- for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
- print(f, ", ",
- idp_bounds_delta_local[Symbol(string(variable), "_",
- string(min_or_max))][stride_size])
- end
+ end
+ if local_onesided
+ for (variable, min_or_max) in limiter.local_onesided_variables_nonlinear
+ key = Symbol(string(variable), "_", string(min_or_max))
+ print(f, ", ", idp_bounds_delta_local[key])
end
- if positivity
- for v in limiter.positivity_variables_cons
- if v in limiter.local_twosided_variables_cons
- continue
- end
- print(f, ", ", idp_bounds_delta_local[Symbol(string(v), "_min")])
- end
- for variable in limiter.positivity_variables_nonlinear
- print(f, ", ",
- idp_bounds_delta_local[Symbol(string(variable), "_min")])
+ end
+ if positivity
+ for v in limiter.positivity_variables_cons
+ if v in limiter.local_twosided_variables_cons
+ continue
end
+ print(f, ", ", idp_bounds_delta_local[Symbol(string(v), "_min")])
+ end
+ for variable in limiter.positivity_variables_nonlinear
+ print(f, ", ", idp_bounds_delta_local[Symbol(string(variable), "_min")])
end
- println(f)
- end
- # Reset local maximum deviations
- for (key, _) in idp_bounds_delta_local
- idp_bounds_delta_local[key] = zero(eltype(idp_bounds_delta_local[key]))
end
+ println(f)
+ end
+
+ # Reset local maximum deviations
+ for (key, _) in idp_bounds_delta_local
+ idp_bounds_delta_local[key] = zero(eltype(idp_bounds_delta_local[key]))
end
return nothing
diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl
index efd4058031f..a004d1452b7 100644
--- a/test/test_tree_2d_euler.jl
+++ b/test/test_tree_2d_euler.jl
@@ -400,6 +400,7 @@ end
end
@trixi_testset "elixir_euler_sedov_blast_wave_sc_subcell.jl" begin
+ rm(joinpath("out", "deviations.txt"), force = true)
@test_trixi_include(joinpath(EXAMPLES_DIR,
"elixir_euler_sedov_blast_wave_sc_subcell.jl"),
l2=[
@@ -416,7 +417,20 @@ end
],
tspan=(0.0, 1.0),
initial_refinement_level=4,
- coverage_override=(maxiters = 6,))
+ coverage_override=(maxiters = 6,),
+ save_errors=true)
+ lines = readlines(joinpath("out", "deviations.txt"))
+ @test lines[1] == "# iter, simu_time, rho_min, rho_max, entropy_guermond_etal_min"
+ cmd = string(Base.julia_cmd())
+ coverage = occursin("--code-coverage", cmd) &&
+ !occursin("--code-coverage=none", cmd)
+ if coverage
+ # Run with coverage takes 6 time steps.
+ @test startswith(lines[end], "6")
+ else
+ # Run without coverage takes 89 time steps.
+ @test startswith(lines[end], "89")
+ end
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
@@ -614,6 +628,7 @@ end
end
@trixi_testset "elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl" begin
+ rm(joinpath("out", "deviations.txt"), force = true)
@test_trixi_include(joinpath(EXAMPLES_DIR,
"elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl"),
l2=[
@@ -628,7 +643,13 @@ end
0.5822547982757897,
0.7300051017382696,
],
- tspan=(0.0, 2.0))
+ tspan=(0.0, 2.0),
+ coverage_override=(maxiters = 7,),
+ save_errors=true)
+ lines = readlines(joinpath("out", "deviations.txt"))
+ @test lines[1] == "# iter, simu_time, rho_min, pressure_min"
+ # Run without (with) coverage takes 745 (7) time steps
+ @test startswith(lines[end], "7")
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
diff --git a/test/test_tree_2d_eulermulti.jl b/test/test_tree_2d_eulermulti.jl
index 0aaa9be1c5c..5b984611687 100644
--- a/test/test_tree_2d_eulermulti.jl
+++ b/test/test_tree_2d_eulermulti.jl
@@ -61,7 +61,7 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
end
@trixi_testset "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl" begin
- rm("out/deviations.txt", force = true)
+ rm(joinpath("out", "deviations.txt"), force = true)
@test_trixi_include(joinpath(EXAMPLES_DIR,
"elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl"),
l2=[
@@ -80,9 +80,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
],
initial_refinement_level=3,
tspan=(0.0, 0.001),
- output_directory="out")
- lines = readlines("out/deviations.txt")
+ save_errors=true)
+ lines = readlines(joinpath("out", "deviations.txt"))
@test lines[1] == "# iter, simu_time, rho1_min, rho2_min"
+ # Runs with and without coverage take 1 and 15 time steps.
@test startswith(lines[end], "1")
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
From fadfb3ad451515fd7446e4cbef2459c04e8a8abf Mon Sep 17 00:00:00 2001
From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com>
Date: Wed, 15 May 2024 11:06:16 +0200
Subject: [PATCH 12/44] Add one-sided limiting for nonlinear variables to
tutorial (#1934)
* Add one-sided limiting for nonlinear variables to tutorial
* Add docstring; link to documentation in tutorial
---
docs/literate/src/files/subcell_shock_capturing.jl | 8 +++++++-
src/equations/compressible_euler_2d.jl | 14 ++++++++++++--
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/docs/literate/src/files/subcell_shock_capturing.jl b/docs/literate/src/files/subcell_shock_capturing.jl
index b4d08965361..8b5399c23a9 100644
--- a/docs/literate/src/files/subcell_shock_capturing.jl
+++ b/docs/literate/src/files/subcell_shock_capturing.jl
@@ -96,7 +96,8 @@ positivity_variables_nonlinear = [pressure]
# ### Local bounds
# Second, Trixi.jl supports the limiting with local bounds for conservative variables using a
-# two-sided Zalesak-type limiter ([Zalesak, 1979](https://doi.org/10.1016/0021-9991(79)90051-2)).
+# two-sided Zalesak-type limiter ([Zalesak, 1979](https://doi.org/10.1016/0021-9991(79)90051-2))
+# and for general non-linear variables using a one-sided Newton-bisection algorithm.
# They allow to avoid spurious oscillations within the global bounds and to improve the
# shock-capturing capabilities of the method. The corresponding numerical admissibility conditions
# are frequently formulated as local maximum or minimum principles. The local bounds are computed
@@ -108,6 +109,11 @@ positivity_variables_nonlinear = [pressure]
# the following.
local_twosided_variables_cons = ["rho"]
+# To limit non-linear variables locally, pass the variable function combined with the requested
+# bound (`min` or `max`) as a tuple. For instance, to impose a lower local bound on the modified
+# specific entropy [`Trixi.entropy_guermond_etal`](@ref), use
+local_onesided_variables_nonlinear = [(Trixi.entropy_guermond_etal, min)]
+
# ## Exemplary simulation
# How to set up a simulation using the IDP limiting becomes clearer when looking at an exemplary
# setup. This will be a simplified version of `tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell.jl`.
diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl
index 0614066806c..d15c5c65355 100644
--- a/src/equations/compressible_euler_2d.jl
+++ b/src/equations/compressible_euler_2d.jl
@@ -2018,8 +2018,18 @@ end
return cons2entropy(u, equations)
end
-# Calculate the modified specific entropy of Guermond et al. (2019): s_0 = p * rho^(-gamma) / (gamma-1).
-# Note: This is *not* the "conventional" specific entropy s = ln(p / rho^(gamma)).
+@doc raw"""
+ entropy_guermond_etal(u, equations::CompressibleEulerEquations2D)
+
+Calculate the modified specific entropy of Guermond et al. (2019):
+```math
+s_0 = p * \rho^{-\gamma} / (\gamma-1).
+```
+Note: This is *not* the "conventional" specific entropy ``s = ln(p / \rho^\gamma)``.
+- Guermond at al. (2019)
+ Invariant domain preserving discretization-independent schemes and convex limiting for hyperbolic systems.
+ [DOI: 10.1016/j.cma.2018.11.036](https://doi.org/10.1016/j.cma.2018.11.036)
+"""
@inline function entropy_guermond_etal(u, equations::CompressibleEulerEquations2D)
rho, rho_v1, rho_v2, rho_e = u
From 3d0cc8d85e02abb913d30330c9e993b2b26151d6 Mon Sep 17 00:00:00 2001
From: Huiyu Xie
Date: Wed, 15 May 2024 07:25:56 -0700
Subject: [PATCH 13/44] Provide document of numeric types and type stability
(#1938)
* start with docs/src/conventions
* check doc style
* initial docs version
* check and add more
* add more check
* apply suggestions from review
* apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
* revise again
* apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
---------
Co-authored-by: Michael Schlottke-Lakemper
---
docs/src/conventions.md | 71 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 67 insertions(+), 4 deletions(-)
diff --git a/docs/src/conventions.md b/docs/src/conventions.md
index 4f9e0ec4e67..dee860d675e 100644
--- a/docs/src/conventions.md
+++ b/docs/src/conventions.md
@@ -41,7 +41,7 @@ following naming conventions:
use these indices.
-# Keywords in elixirs
+## Keywords in elixirs
Trixi.jl is distributed with several examples in the form of elixirs, small
Julia scripts containing everything to set up and run a simulation. Working
@@ -61,9 +61,9 @@ can only perform simple replacements. Some standard variables names are
Moreover, [`convergence_test`](@ref) requires that the spatial resolution is
set via the keywords
- `initial_refinement_level`
- (an integer, e.g. for the [`TreeMesh`](@ref) and the [`P4estMesh`](@ref)) or
+ (an integer, e.g., for the [`TreeMesh`](@ref) and the [`P4estMesh`](@ref)) or
- `cells_per_dimension`
- (a tuple of integers, one per spatial dimension, e.g. for the [`StructuredMesh`](@ref)
+ (a tuple of integers, one per spatial dimension, e.g., for the [`StructuredMesh`](@ref)
and the [`DGMultiMesh`](@ref)).
@@ -101,8 +101,71 @@ based on the following rules.
(or `wrap_array_native(u_ode, semi)`) for further processing.
- When some solution is passed together with the `mesh, equations, solver, cache, ...`,
it is already wrapped via `wrap_array` (or `wrap_array_native`).
-- Exceptions of this rule are possible, e.g. for AMR, but must be documented in
+- Exceptions of this rule are possible, e.g., for AMR, but must be documented in
the code.
- `wrap_array` should be used as default option. `wrap_array_native` should only
be used when necessary, e.g., to avoid additional overhead when interfacing
with external C libraries such as HDF5, MPI, or visualization.
+
+## Numeric types and type stability
+
+In Trixi.jl, we use generic programming to support custom data types to store the numerical simulation data, including standard floating point types and automatic differentiation types.
+Specifically, `Float32` and `Float64` types are fully supported, including the ability to run Trixi.jl on hardware that only supports `Float32` types.
+We ensure the type stability of these numeric types throughout the development process.
+Below are some guidelines to apply in various scenarios.
+
+### Exact floating-point numbers
+
+Some real numbers can be represented exactly as both `Float64` and `Float32` values (e.g., `0.25`, `0.5`, `1/2`). We prefer to use `Float32` type for these numbers to achieve a concise way of possible type promotion. For example,
+```julia
+# Assume we have `0.25`, `0.5`, `1/2` in function
+0.25f0, 0.5f0, 0.5f0 # corresponding numbers
+```
+Generally, this equivalence is true for integer multiples of powers of two. That is, numbers that can be written as ``m 2^n``, where ``m, n \in \mathbb{Z}``, and where ``m`` and ``n`` are such that the result is representable as a [single precision floating point](https://en.wikipedia.org/wiki/Single-precision_floating-point_format) value. If a decimal value `v` is exactly representable in `Float32`, the expression
+```julia
+Float32(v) == v
+```
+will evaluate to `true`.
+
+### Non-exact floating-point numbers
+
+For real numbers that cannot be exactly represented in machine precision (e.g., `0.1`, `1/3`, `pi`), use the `convert` function to make them consistent with the type of the function input. For example,
+```julia
+# Assume we are handling `pi` in function
+function foo(..., input, ...)
+ RealT = eltype(input) # see **notes** below
+ # ...
+ c1 = convert(RealT, pi) * c2 # sample operation
+ # ...
+end
+```
+
+### Integer numbers
+
+Integers need special consideration. Using functions like `convert` (as mentioned above), `zero`, and `one` to change integers to a specific type or keeping them in integer format is feasible in most cases. We aim to balance code readability and consistency while maintaining type stability. Here are some examples to guide our developers.
+```julia
+# The first example - `SVector`, keep code consistency within `SVector`
+SVector(0, 0, 0)
+SVector(zero(RealT), zero(RealT), zero(RealT))
+Svector(one(RealT), one(RealT), one(RealT))
+
+# The second example - inner functions, keep them type-stable as well
+function foo(..., input, ...)
+RealT = eltype(input) # see **notes** below
+# ...
+c1 = c2 > 0.5f0 ? one(RealT) : convert(RealT, 0.1) # make type-stable
+# ...
+end
+
+# The third example - some operations (e.g., `/`, `sqrt`, `inv`), convert them definitely
+c1 = convert(RealT, 4) # suppose we get RealT before
+c2 = 1 / c1
+c3 = sqrt(c1)
+c4 = inv(c1)
+```
+In general, in the case of integer numbers, our developers should apply a case-by-case strategy to maintain type stability.
+
+### Notes
+1. If the function gets a local pointwise vector of the solution variables `u` such as `flux(u, equations)`, use `u` to determine the real type `eltype(u)`.
+2. If `u` is not passed as an argument but a vector of coordinates `x` such as `initial_condition(x, t, equations)`, use `eltype(x)` instead.
+3. Choose an appropriate argument to determine the real type otherwise.
\ No newline at end of file
From 715be52eef85ab0868c6ba1b7326cec3f8b2de91 Mon Sep 17 00:00:00 2001
From: mleprovost <38221506+mleprovost@users.noreply.github.com>
Date: Thu, 16 May 2024 00:10:15 -0400
Subject: [PATCH 14/44] Added Shu-Osher initialization for 1D compressible
Euler with Gauss nodes (#1943)
* Added 1D Shu-Osher initialization for compressible Euler 1D
* Fix: Addresses comments from PR 1943
* Added test for Euler 1D Shu Osher with Gauss nodes
* Remove useless comment in test
* Formate files with JuliaFormatter 1.0.45
* Fixed error in test for Shu Osher Gauss node problem
* Fix issue in initial condition for Shu Osher
---------
Co-authored-by: Hendrik Ranocha
---
...r_euler_shu_osher_gauss_shock_capturing.jl | 93 +++++++++++++++++++
test/test_dgmulti_1d.jl | 24 +++++
2 files changed, 117 insertions(+)
create mode 100644 examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl
diff --git a/examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl b/examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl
new file mode 100644
index 00000000000..79c92656176
--- /dev/null
+++ b/examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl
@@ -0,0 +1,93 @@
+using Trixi
+using OrdinaryDiffEq
+
+gamma_gas = 1.4
+equations = CompressibleEulerEquations1D(gamma_gas)
+
+###############################################################################
+# setup the GSBP DG discretization that uses the Gauss operators from
+# Chan, Del Rey Fernandez, Carpenter (2019).
+# [https://doi.org/10.1137/18M1209234](https://doi.org/10.1137/18M1209234)
+
+# Shu-Osher initial condition for 1D compressible Euler equations
+# Example 8 from Shu, Osher (1989).
+# [https://doi.org/10.1016/0021-9991(89)90222-2](https://doi.org/10.1016/0021-9991(89)90222-2)
+function initial_condition_shu_osher(x, t, equations::CompressibleEulerEquations1D)
+ x0 = -4
+
+ rho_left = 27 / 7
+ v_left = 4 * sqrt(35) / 9
+ p_left = 31 / 3
+
+ # Replaced v_right = 0 to v_right = 0.1 to avoid positivity issues.
+ v_right = 0.1
+ p_right = 1.0
+
+ rho = ifelse(x[1] > x0, 1 + 1 / 5 * sin(5 * x[1]), rho_left)
+ v = ifelse(x[1] > x0, v_right, v_left)
+ p = ifelse(x[1] > x0, p_right, p_left)
+
+ return prim2cons(SVector(rho, v, p),
+ equations)
+end
+
+initial_condition = initial_condition_shu_osher
+
+surface_flux = flux_lax_friedrichs
+volume_flux = flux_ranocha
+
+polydeg = 3
+basis = DGMultiBasis(Line(), polydeg, approximation_type = GaussSBP())
+
+indicator_sc = IndicatorHennemannGassner(equations, basis,
+ alpha_max = 0.5,
+ alpha_min = 0.001,
+ alpha_smooth = true,
+ variable = density_pressure)
+volume_integral = VolumeIntegralShockCapturingHG(indicator_sc;
+ volume_flux_dg = volume_flux,
+ volume_flux_fv = surface_flux)
+
+dg = DGMulti(basis,
+ surface_integral = SurfaceIntegralWeakForm(surface_flux),
+ volume_integral = volume_integral)
+
+boundary_condition = BoundaryConditionDirichlet(initial_condition)
+boundary_conditions = (; :entire_boundary => boundary_condition)
+
+###############################################################################
+# setup the 1D mesh
+
+cells_per_dimension = (64,)
+mesh = DGMultiMesh(dg, cells_per_dimension,
+ coordinates_min = (-5.0,), coordinates_max = (5.0,),
+ periodicity = false)
+
+###############################################################################
+# setup the semidiscretization and ODE problem
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition,
+ dg, boundary_conditions = boundary_conditions)
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# setup the callbacks
+
+# prints a summary of the simulation setup and resets the timers
+summary_callback = SummaryCallback()
+
+# analyse the solution in regular intervals and prints the results
+analysis_callback = AnalysisCallback(semi, interval = 100, uEltype = real(dg))
+
+# handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl = 0.1)
+
+# collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback)
+
+# ###############################################################################
+# # run the simulation
+
+sol = solve(ode, SSPRK43(), adaptive = true, callback = callbacks, save_everystep = false)
diff --git a/test/test_dgmulti_1d.jl b/test/test_dgmulti_1d.jl
index 0d083cf9a72..7ac3c735642 100644
--- a/test/test_dgmulti_1d.jl
+++ b/test/test_dgmulti_1d.jl
@@ -69,6 +69,30 @@ end
end
end
+@trixi_testset "elixir_euler_shu_osher_gauss_shock_capturing.jl " begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_euler_shu_osher_gauss_shock_capturing.jl"),
+ cells_per_dimension=(64,), tspan=(0.0, 1.0),
+ l2=[
+ 1.673813320412685,
+ 5.980737909458242,
+ 21.587822949251173,
+ ],
+ linf=[
+ 3.1388039126918064,
+ 10.630952212105246,
+ 37.682826521024865,
+ ])
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
+end
+
@trixi_testset "elixir_euler_flux_diff.jl (convergence)" begin
mean_convergence = convergence_test(@__MODULE__,
joinpath(EXAMPLES_DIR,
From 3d044b8e44b0ea7a9e662b7b7649917ffe3c69d6 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Thu, 16 May 2024 08:20:33 +0200
Subject: [PATCH 15/44] set version to v0.7.13
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 769c1fe7d08..c2c8a04448c 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.13-pre"
+version = "0.7.13"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 6e5ff49a8f68bb1ca79ae5ea021cb1ff01291efb Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Thu, 16 May 2024 08:20:48 +0200
Subject: [PATCH 16/44] set development version to v0.7.14-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index c2c8a04448c..094ad067ca3 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.13"
+version = "0.7.14-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From b98b6f07c3b4242eaa4ffe5d17dd143a5a267e60 Mon Sep 17 00:00:00 2001
From: Andrew Winters
Date: Fri, 17 May 2024 08:21:24 +0200
Subject: [PATCH 17/44] Mean value in Zhang-Shu limiter on curved meshes
(#1945)
* solution mean value computation must take metric terms into account on mesh types other than TreeMesh
* introduce helper function to compute mean value on 2d element
* apply formatter
* introduce helper function in 3d
* simplify mean value computation. no need for a separate function
* fix setting total_volume to be zero
* put calc_mean_value functions back in
* apply formatter
* add generic get_inverse_jacobian function and remove other specializations
* swap order of element and indices for consistency
* revert ordering. see if tests actually run
* fix index order
* fix mistake in TreeMesh version
* clarify the tree mesh version
---
src/callbacks_stage/positivity_zhang_shu_dg2d.jl | 11 ++++++++---
src/callbacks_stage/positivity_zhang_shu_dg3d.jl | 11 ++++++++---
src/solvers/dgsem_structured/dg.jl | 8 ++++++++
src/solvers/dgsem_tree/dg.jl | 6 ++++++
4 files changed, 30 insertions(+), 6 deletions(-)
diff --git a/src/callbacks_stage/positivity_zhang_shu_dg2d.jl b/src/callbacks_stage/positivity_zhang_shu_dg2d.jl
index b37ed9c49d5..813dd65878b 100644
--- a/src/callbacks_stage/positivity_zhang_shu_dg2d.jl
+++ b/src/callbacks_stage/positivity_zhang_shu_dg2d.jl
@@ -8,6 +8,7 @@
function limiter_zhang_shu!(u, threshold::Real, variable,
mesh::AbstractMesh{2}, equations, dg::DGSEM, cache)
@unpack weights = dg.basis
+ @unpack inverse_jacobian = cache.elements
@threaded for element in eachelement(dg, cache)
# determine minimum value
@@ -22,12 +23,16 @@ function limiter_zhang_shu!(u, threshold::Real, variable,
# compute mean value
u_mean = zero(get_node_vars(u, equations, dg, 1, 1, element))
+ total_volume = zero(eltype(u))
for j in eachnode(dg), i in eachnode(dg)
+ volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh,
+ i, j, element)))
u_node = get_node_vars(u, equations, dg, i, j, element)
- u_mean += u_node * weights[i] * weights[j]
+ u_mean += u_node * weights[i] * weights[j] * volume_jacobian
+ total_volume += weights[i] * weights[j] * volume_jacobian
end
- # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2
- u_mean = u_mean / 2^ndims(mesh)
+ # normalize with the total volume
+ u_mean = u_mean / total_volume
# We compute the value directly with the mean values, as we assume that
# Jensen's inequality holds (e.g. pressure for compressible Euler equations).
diff --git a/src/callbacks_stage/positivity_zhang_shu_dg3d.jl b/src/callbacks_stage/positivity_zhang_shu_dg3d.jl
index 773a236d831..156abf35b4c 100644
--- a/src/callbacks_stage/positivity_zhang_shu_dg3d.jl
+++ b/src/callbacks_stage/positivity_zhang_shu_dg3d.jl
@@ -8,6 +8,7 @@
function limiter_zhang_shu!(u, threshold::Real, variable,
mesh::AbstractMesh{3}, equations, dg::DGSEM, cache)
@unpack weights = dg.basis
+ @unpack inverse_jacobian = cache.elements
@threaded for element in eachelement(dg, cache)
# determine minimum value
@@ -22,12 +23,16 @@ function limiter_zhang_shu!(u, threshold::Real, variable,
# compute mean value
u_mean = zero(get_node_vars(u, equations, dg, 1, 1, 1, element))
+ total_volume = zero(eltype(u))
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
+ volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh,
+ i, j, k, element)))
u_node = get_node_vars(u, equations, dg, i, j, k, element)
- u_mean += u_node * weights[i] * weights[j] * weights[k]
+ u_mean += u_node * weights[i] * weights[j] * weights[k] * volume_jacobian
+ total_volume += weights[i] * weights[j] * weights[k] * volume_jacobian
end
- # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2
- u_mean = u_mean / 2^ndims(mesh)
+ # normalize with the total volume
+ u_mean = u_mean / total_volume
# We compute the value directly with the mean values, as we assume that
# Jensen's inequality holds (e.g. pressure for compressible Euler equations).
diff --git a/src/solvers/dgsem_structured/dg.jl b/src/solvers/dgsem_structured/dg.jl
index 00e321fba65..424ed5e1e7e 100644
--- a/src/solvers/dgsem_structured/dg.jl
+++ b/src/solvers/dgsem_structured/dg.jl
@@ -76,6 +76,14 @@ end
end
end
+@inline function get_inverse_jacobian(inverse_jacobian,
+ mesh::Union{StructuredMesh, StructuredMeshView,
+ UnstructuredMesh2D, P4estMesh,
+ T8codeMesh},
+ indices...)
+ return inverse_jacobian[indices...]
+end
+
include("containers.jl")
include("dg_1d.jl")
include("dg_2d.jl")
diff --git a/src/solvers/dgsem_tree/dg.jl b/src/solvers/dgsem_tree/dg.jl
index ef9a42b4c1a..0993b3c9b85 100644
--- a/src/solvers/dgsem_tree/dg.jl
+++ b/src/solvers/dgsem_tree/dg.jl
@@ -42,6 +42,12 @@ function volume_jacobian(element, mesh::TreeMesh, cache)
return inv(cache.elements.inverse_jacobian[element])^ndims(mesh)
end
+@inline function get_inverse_jacobian(inverse_jacobian, mesh::TreeMesh,
+ indices...)
+ element = last(indices)
+ return inverse_jacobian[element]
+end
+
# Indicators used for shock-capturing and AMR
include("indicators.jl")
include("indicators_1d.jl")
From c657b0ff642de93eb08cbc2f3da1cbb77c13e216 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 21 May 2024 16:08:54 +0200
Subject: [PATCH 18/44] set version to v0.7.14
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 094ad067ca3..200d35c0a85 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.14-pre"
+version = "0.7.14"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 887bab9ea10f12da3e3b4240773638dbb88f46a1 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Tue, 21 May 2024 16:09:07 +0200
Subject: [PATCH 19/44] set development version to v0.7.15-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 200d35c0a85..9ebb9c307a1 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.14"
+version = "0.7.15-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From c2513e2ab392393c9f6230e94c9502d93c2aef00 Mon Sep 17 00:00:00 2001
From: warisa-r <81345089+warisa-r@users.noreply.github.com>
Date: Thu, 23 May 2024 08:42:45 +0200
Subject: [PATCH 20/44] Perk p2 single ext (#1908)
* Templates for PERK p2, p3
* example
* constructor changed
* Modified so that both of the constructor stay
* bring back eps
* correct val
* add constructor of PERK3
* minor fixes
* function and variable name adjustments
* spelling fix
* Change names and spacing according to style guide
* spelling correction
* Apply suggestions from code review
Co-authored-by: Daniel Doehring
* snake case
* revisit perk single p2 p3
* fmt
* fmt
* semantic ordering
* add literature
* Strip code of p = 3
* Add the line to show the error of the elixir
* Make adjustments to a test file and delete example of PERK3
* Update Project.toml
Co-authored-by: Daniel Doehring
* Update examples/tree_1d_dgsem/elixir_advection_PERK2.jl
Co-authored-by: Daniel Doehring
* Add comments in an example of PERK2
Co-authored-by: Daniel Doehring
* Update src/Trixi.jl
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* add tspan as a parameter and adjust the elixir accordingly
* add an additional constructor and modify the function compute_PERK2_butcher_tableau
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* update filter function in polynomial optimizer, add literature, and change se_factors to bc_factors
* Apply suggestions from code review
Co-authored-by: Daniel Doehring
* change tspan to have (,) instead of[,]. filter_eigenvalues minor adjustments
* Apply suggestions from code review
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/polynomial_optimizer.jl
Co-authored-by: Hendrik Ranocha
* Apply suggestions from code review
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
* Apply suggestions from code review
* use readdlm instead of read_file
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
* add DelimFIles
* fmt
* Unit tests
* compat
* ecos compat
* compat
* compat
* compat
* compat
* callbacks
* increase allowed allocs
* fmt
* timer for step callbacks
* deps compat
* remove del files compat
* skip delimitedfiles in downgrade compat
* v1
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
* modular imp of the integrator
* modularized PolynamialOptimizer
* PolynomialOpt modularized
* use "fake" extension
* make ECOS weakdep
* fix name
* comment + fmt
* Apply suggestions from code review
Co-authored-by: Daniel Doehring
* comment, fix, and make threshold optional
* Apply suggestions from code review
Co-authored-by: Daniel Doehring
* edit tspan and alive_interval
* Apply suggestions from code review
Co-authored-by: Daniel Doehring
* fmt
* fix fmr in test unit
* fix fmt in test_unit.jl
* some fixes according to code review
* add ECOS as a part of enabling TrixiConvexECOSExt.jl compiles
* state functions and classes from Convex package used in polynomial optimization
* fmt
* remove unconditional output in bisection
* Apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
* apply suggestion from code review
* minor fix
* Apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
* add verbose as an optional argument for printing out some outputs during bisection and filtering eigenvalues
* remove the file with upper case
* add the file with corrected name
* minor fix
* move constructors outside of struct
* Apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
* alter some names from being abbreviated and some fixes
* add comments for some functions and move all polynomial optimizaton related functions to TrixiConvexECOSExt.jl
* add comments to functions computing butcher tableau
* fmt
* apply suggestions + fmt
* apply suggestion according to code review
* Apply suggestions from code review
Co-authored-by: Daniel Doehring
* fix PERK2's name
* add short comment regarding PERK's abbreviation
* fix export
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Daniel Doehring
* minor corrections
* remove exported of PERK functions in extension
* set verbose's default value in kwarg
* fix path_monomial_coeffs
* fmt
* add ECOS as dependency in test/Project.toml
* bring back polynomial_optimizer so that user can call the constructor with file path can use filter_eigvals without having to load Convex and ECOS
* fix values for some tests and add use Convex and ECOS to load function extensions
* fix error undefined b1
* attempt to fix error from test by specifying Trixi.entropy
* fmt
* adjust the value of tests to allign with one in CI and add print command for a test in test_unit.jl
* exclude convex warning
* update test vals
* more warning exclusions
* Apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
* Apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
* minor fix
* add what is used from ECOS
* spell check
* add information about this PR in NEWS.md
* Update NEWS.md
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
* fix NEWS.md
* change Trixi.entropy back to just entropy
* Update NEWS.md
Co-authored-by: Michael Schlottke-Lakemper
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Michael Schlottke-Lakemper
* Apply suggestions from code review
Co-authored-by: Michael Schlottke-Lakemper
* Apply suggestions from code review
* Apply suggestions from code review
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
* add some explanations regarding the arguments of the constructors
* fmt
* exchange "c_end" for "cS" for consistency
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
* add more explaination
* minor adjustment
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
* Update src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
---------
Co-authored-by: Daniel_Doehring
Co-authored-by: Daniel Doehring
Co-authored-by: Hendrik Ranocha
Co-authored-by: Michael Schlottke-Lakemper
Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
---
.github/workflows/Downgrade.yml | 2 +-
NEWS.md | 5 +-
Project.toml | 9 +
.../tree_1d_dgsem/elixir_advection_perk2.jl | 66 +++
ext/TrixiConvexECOSExt.jl | 158 +++++++
src/Trixi.jl | 8 +
.../methods_PERK2.jl | 410 ++++++++++++++++++
.../paired_explicit_runge_kutta.jl | 12 +
.../polynomial_optimizer.jl | 28 ++
src/time_integration/time_integration.jl | 1 +
test/Project.toml | 6 +
test/test_tree_1d_advection.jl | 14 +
test/test_trixi.jl | 5 +-
test/test_unit.jl | 40 ++
14 files changed, 760 insertions(+), 4 deletions(-)
create mode 100644 examples/tree_1d_dgsem/elixir_advection_perk2.jl
create mode 100644 ext/TrixiConvexECOSExt.jl
create mode 100644 src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
create mode 100644 src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl
create mode 100644 src/time_integration/paired_explicit_runge_kutta/polynomial_optimizer.jl
diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml
index 234939a8017..46ce5d71e36 100644
--- a/.github/workflows/Downgrade.yml
+++ b/.github/workflows/Downgrade.yml
@@ -72,7 +72,7 @@ jobs:
- uses: julia-actions/cache@v1
- uses: julia-actions/julia-downgrade-compat@v1
with:
- skip: LinearAlgebra,Printf,SparseArrays,UUIDs,DiffEqBase
+ skip: LinearAlgebra,Printf,SparseArrays,UUIDs,DiffEqBase,DelimitedFiles
projects: ., test
- uses: julia-actions/julia-buildpkg@v1
env:
diff --git a/NEWS.md b/NEWS.md
index e2902229f71..ebc8d9cda39 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -7,12 +7,13 @@ for human readability.
## Changes in the v0.7 lifecycle
#### Added
-- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D` and extension
- to 1D and 3D on `TreeMesh` ([#1855], [#1873]).
+- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D` and extension to 1D and 3D on `TreeMesh` ([#1855], [#1873]).
- Implementation of 1D Linearized Euler Equations ([#1867]).
- New analysis callback for 2D `P4estMesh` to compute integrated quantities along a boundary surface, e.g., pressure lift and drag coefficients ([#1812]).
- Optional tuple parameter for `GlmSpeedCallback` called `semi_indices` to specify for which semidiscretization of a `SemidiscretizationCoupled` we need to update the GLM speed ([#1835]).
- Subcell local one-sided limiting support for nonlinear variables in 2D for `TreeMesh` ([#1792]).
+- New time integrator `PairedExplicitRK2`, implementing the second-order paired explicit Runge-Kutta
+ method with [Convex.jl](https://github.com/jump-dev/Convex.jl) and [ECOS.jl](https://github.com/jump-dev/ECOS.jl) ([#1908])
## Changes when updating to v0.7 from v0.6.x
diff --git a/Project.toml b/Project.toml
index 9ebb9c307a1..b605cb2c6b1 100644
--- a/Project.toml
+++ b/Project.toml
@@ -7,6 +7,7 @@ version = "0.7.15-pre"
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
+DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e"
DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def"
Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
@@ -50,17 +51,23 @@ UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[weakdeps]
Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"
+Convex = "f65535da-76fb-5f13-bab9-19810c17039a"
+ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
[extensions]
TrixiMakieExt = "Makie"
+TrixiConvexECOSExt = ["Convex", "ECOS"]
[compat]
CodeTracking = "1.0.5"
ConstructionBase = "1.3"
+Convex = "0.15.4"
DataStructures = "0.18.15"
+DelimitedFiles = "1"
DiffEqBase = "6 - 6.143"
DiffEqCallbacks = "2.25"
Downloads = "1.6"
+ECOS = "1.1.2"
EllipsisNotation = "1.0"
FillArrays = "0.13.2, 1"
ForwardDiff = "0.10.24"
@@ -103,3 +110,5 @@ julia = "1.8"
[extras]
Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"
+Convex = "f65535da-76fb-5f13-bab9-19810c17039a"
+ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
\ No newline at end of file
diff --git a/examples/tree_1d_dgsem/elixir_advection_perk2.jl b/examples/tree_1d_dgsem/elixir_advection_perk2.jl
new file mode 100644
index 00000000000..87a61515f82
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_advection_perk2.jl
@@ -0,0 +1,66 @@
+
+using Convex, ECOS
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the linear advection equation
+
+advection_velocity = 1.0
+equations = LinearScalarAdvectionEquation1D(advection_velocity)
+
+# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux
+solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs)
+
+coordinates_min = -1.0 # minimum coordinate
+coordinates_max = 1.0 # maximum coordinate
+
+# Create a uniformly refined mesh with periodic boundaries
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level = 4,
+ n_cells_max = 30_000) # set maximum capacity of tree data structure
+
+# A semidiscretization collects data structures and functions for the spatial discretization
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test,
+ solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span from 0.0 to 20.0
+tspan = (0.0, 20.0)
+ode = semidiscretize(semi, tspan);
+
+# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup
+# and resets the timers
+summary_callback = SummaryCallback()
+
+# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step
+stepsize_callback = StepsizeCallback(cfl = 2.5)
+
+alive_callback = AliveCallback(alive_interval = analysis_interval)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback,
+ alive_callback,
+ analysis_callback,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+# Construct second order paired explicit Runge-Kutta method with 6 stages for given simulation setup.
+# Pass `tspan` to calculate maximum time step allowed for the bisection algorithm used
+# in calculating the polynomial coefficients in the ODE algorithm.
+ode_algorithm = Trixi.PairedExplicitRK2(6, tspan, semi)
+
+sol = Trixi.solve(ode, ode_algorithm,
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+
+# Print the timer summary
+summary_callback()
diff --git a/ext/TrixiConvexECOSExt.jl b/ext/TrixiConvexECOSExt.jl
new file mode 100644
index 00000000000..fac127699ce
--- /dev/null
+++ b/ext/TrixiConvexECOSExt.jl
@@ -0,0 +1,158 @@
+# Package extension for adding Convex-based features to Trixi.jl
+module TrixiConvexECOSExt
+
+# Required for coefficient optimization in P-ERK scheme integrators
+if isdefined(Base, :get_extension)
+ using Convex: MOI, solve!, Variable, minimize, evaluate
+ using ECOS: Optimizer
+else
+ # Until Julia v1.9 is the minimum required version for Trixi.jl, we still support Requires.jl
+ using ..Convex: MOI, solve!, Variable, minimize, evaluate
+ using ..ECOS: Optimizer
+end
+
+# Use other necessary libraries
+using LinearAlgebra: eigvals
+
+# Use functions that are to be extended and additional symbols that are not exported
+using Trixi: Trixi, undo_normalization!, bisect_stability_polynomial, @muladd
+
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# Undo normalization of stability polynomial coefficients by index factorial
+# relative to consistency order.
+function Trixi.undo_normalization!(gamma_opt, consistency_order, num_stage_evals)
+ for k in (consistency_order + 1):num_stage_evals
+ gamma_opt[k - consistency_order] = gamma_opt[k - consistency_order] /
+ factorial(k)
+ end
+ return gamma_opt
+end
+
+# Compute stability polynomials for paired explicit Runge-Kutta up to specified consistency
+# order, including contributions from free coefficients for higher orders, and
+# return the maximum absolute value
+function stability_polynomials!(pnoms, consistency_order, num_stage_evals,
+ normalized_powered_eigvals_scaled,
+ gamma)
+ num_eig_vals = length(pnoms)
+
+ # Initialize with zero'th order (z^0) coefficient
+ for i in 1:num_eig_vals
+ pnoms[i] = 1.0
+ end
+
+ # First `consistency_order` terms of the exponential
+ for k in 1:consistency_order
+ for i in 1:num_eig_vals
+ pnoms[i] += normalized_powered_eigvals_scaled[i, k]
+ end
+ end
+
+ # Contribution from free coefficients
+ for k in (consistency_order + 1):num_stage_evals
+ pnoms += gamma[k - consistency_order] * normalized_powered_eigvals_scaled[:, k]
+ end
+
+ # For optimization only the maximum is relevant
+ return maximum(abs(pnoms))
+end
+
+#=
+The following structures and methods provide a simplified implementation to
+discover optimal stability polynomial for a given set of `eig_vals`
+These are designed for the one-step (i.e., Runge-Kutta methods) integration of initial value ordinary
+and partial differential equations.
+
+- Ketcheson and Ahmadia (2012).
+Optimal stability polynomials for numerical integration of initial value problems
+[DOI: 10.2140/camcos.2012.7.247](https://doi.org/10.2140/camcos.2012.7.247)
+=#
+
+# Perform bisection to optimize timestep for stability of the polynomial
+function Trixi.bisect_stability_polynomial(consistency_order, num_eig_vals,
+ num_stage_evals,
+ dtmax, dteps, eig_vals;
+ verbose = false)
+ dtmin = 0.0
+ dt = -1.0
+ abs_p = -1.0
+
+ # Construct stability polynomial for each eigenvalue
+ pnoms = ones(Complex{Float64}, num_eig_vals, 1)
+
+ # Init datastructure for monomial coefficients
+ gamma = Variable(num_stage_evals - consistency_order)
+
+ normalized_powered_eigvals = zeros(Complex{Float64}, num_eig_vals, num_stage_evals)
+
+ for j in 1:num_stage_evals
+ fac_j = factorial(j)
+ for i in 1:num_eig_vals
+ normalized_powered_eigvals[i, j] = eig_vals[i]^j / fac_j
+ end
+ end
+
+ normalized_powered_eigvals_scaled = similar(normalized_powered_eigvals)
+
+ if verbose
+ println("Start optimization of stability polynomial \n")
+ end
+
+ # Bisection on timestep
+ while dtmax - dtmin > dteps
+ dt = 0.5 * (dtmax + dtmin)
+
+ # Compute stability polynomial for current timestep
+ for k in 1:num_stage_evals
+ dt_k = dt^k
+ for i in 1:num_eig_vals
+ normalized_powered_eigvals_scaled[i, k] = dt_k *
+ normalized_powered_eigvals[i,
+ k]
+ end
+ end
+
+ # Use last optimal values for gamma in (potentially) next iteration
+ problem = minimize(stability_polynomials!(pnoms, consistency_order,
+ num_stage_evals,
+ normalized_powered_eigvals_scaled,
+ gamma))
+
+ solve!(problem,
+ # Parameters taken from default values for EiCOS
+ MOI.OptimizerWithAttributes(Optimizer, "gamma" => 0.99,
+ "delta" => 2e-7,
+ "feastol" => 1e-9,
+ "abstol" => 1e-9,
+ "reltol" => 1e-9,
+ "feastol_inacc" => 1e-4,
+ "abstol_inacc" => 5e-5,
+ "reltol_inacc" => 5e-5,
+ "nitref" => 9,
+ "maxit" => 100,
+ "verbose" => 3); silent_solver = true)
+
+ abs_p = problem.optval
+
+ if abs_p < 1
+ dtmin = dt
+ else
+ dtmax = dt
+ end
+ end
+
+ if verbose
+ println("Concluded stability polynomial optimization \n")
+ end
+
+ return evaluate(gamma), dt
+end
+end # @muladd
+
+end # module TrixiConvexECOSExt
diff --git a/src/Trixi.jl b/src/Trixi.jl
index f3977f1f058..3a882d0962c 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -310,6 +310,14 @@ function __init__()
end
end
+ @static if !isdefined(Base, :get_extension)
+ @require Convex="f65535da-76fb-5f13-bab9-19810c17039a" begin
+ @require ECOS="e2685f51-7e38-5353-a97d-a921fd2c8199" begin
+ include("../ext/TrixiConvexECOSExt.jl")
+ end
+ end
+ end
+
# FIXME upstream. This is a hacky workaround for
# https://github.com/trixi-framework/Trixi.jl/issues/628
# https://github.com/trixi-framework/Trixi.jl/issues/1185
diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
new file mode 100644
index 00000000000..b3b917dc18d
--- /dev/null
+++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
@@ -0,0 +1,410 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+using DelimitedFiles: readdlm
+using LinearAlgebra: eigvals
+
+@muladd begin
+#! format: noindent
+
+# Abstract base type for both single/standalone and multi-level
+# PERK (Paired-Explicit Runge-Kutta) time integration schemes
+abstract type AbstractPairedExplicitRK end
+# Abstract base type for single/standalone PERK time integration schemes
+abstract type AbstractPairedExplicitRKSingle <: AbstractPairedExplicitRK end
+
+# Compute the coefficients of the A matrix in the Butcher tableau using
+# stage scaling factors and monomial coefficients
+function compute_a_coeffs(num_stage_evals, stage_scaling_factors, monomial_coeffs)
+ a_coeffs = copy(monomial_coeffs)
+
+ for stage in 1:(num_stage_evals - 2)
+ a_coeffs[stage] /= stage_scaling_factors[stage]
+ for prev_stage in 1:(stage - 1)
+ a_coeffs[stage] /= a_coeffs[prev_stage]
+ end
+ end
+
+ return reverse(a_coeffs)
+end
+
+# Compute the Butcher tableau for a paired explicit Runge-Kutta method order 2
+# using a list of eigenvalues
+function compute_PairedExplicitRK2_butcher_tableau(num_stages, eig_vals, tspan,
+ bS, cS; verbose = false)
+ # c Vector from Butcher Tableau (defines timestep per stage)
+ c = zeros(num_stages)
+ for k in 2:num_stages
+ c[k] = cS * (k - 1) / (num_stages - 1)
+ end
+ stage_scaling_factors = bS * reverse(c[2:(end - 1)])
+
+ # - 2 Since first entry of A is always zero (explicit method) and second is given by c_2 (consistency)
+ coeffs_max = num_stages - 2
+
+ a_matrix = zeros(coeffs_max, 2)
+ a_matrix[:, 1] = c[3:end]
+
+ consistency_order = 2
+
+ dtmax = tspan[2] - tspan[1]
+ dteps = 1e-9 # Hyperparameter of the optimization, might be too large for systems requiring very small timesteps
+
+ num_eig_vals, eig_vals = filter_eig_vals(eig_vals; verbose)
+
+ monomial_coeffs, dt_opt = bisect_stability_polynomial(consistency_order,
+ num_eig_vals, num_stages,
+ dtmax,
+ dteps,
+ eig_vals; verbose)
+ monomial_coeffs = undo_normalization!(monomial_coeffs, consistency_order,
+ num_stages)
+
+ num_monomial_coeffs = length(monomial_coeffs)
+ @assert num_monomial_coeffs == coeffs_max
+ A = compute_a_coeffs(num_stages, stage_scaling_factors, monomial_coeffs)
+
+ a_matrix[:, 1] -= A
+ a_matrix[:, 2] = A
+
+ return a_matrix, c
+end
+
+# Compute the Butcher tableau for a paired explicit Runge-Kutta method order 2
+# using provided monomial coefficients file
+function compute_PairedExplicitRK2_butcher_tableau(num_stages,
+ base_path_monomial_coeffs::AbstractString,
+ bS, cS)
+
+ # c Vector form Butcher Tableau (defines timestep per stage)
+ c = zeros(num_stages)
+ for k in 2:num_stages
+ c[k] = cS * (k - 1) / (num_stages - 1)
+ end
+ stage_scaling_factors = bS * reverse(c[2:(end - 1)])
+
+ # - 2 Since first entry of A is always zero (explicit method) and second is given by c_2 (consistency)
+ coeffs_max = num_stages - 2
+
+ a_matrix = zeros(coeffs_max, 2)
+ a_matrix[:, 1] = c[3:end]
+
+ path_monomial_coeffs = joinpath(base_path_monomial_coeffs,
+ "gamma_" * string(num_stages) * ".txt")
+
+ @assert isfile(path_monomial_coeffs) "Couldn't find file"
+ monomial_coeffs = readdlm(path_monomial_coeffs, Float64)
+ num_monomial_coeffs = size(monomial_coeffs, 1)
+
+ @assert num_monomial_coeffs == coeffs_max
+ A = compute_a_coeffs(num_stages, stage_scaling_factors, monomial_coeffs)
+
+ a_matrix[:, 1] -= A
+ a_matrix[:, 2] = A
+
+ return a_matrix, c
+end
+
+@doc raw"""
+ PairedExplicitRK2(num_stages, base_path_monomial_coeffs::AbstractString,
+ bS = 1.0, cS = 0.5)
+ PairedExplicitRK2(num_stages, tspan, semi::AbstractSemidiscretization;
+ verbose = false, bS = 1.0, cS = 0.5)
+ PairedExplicitRK2(num_stages, tspan, eig_vals::Vector{ComplexF64};
+ verbose = false, bS = 1.0, cS = 0.5)
+ Parameters:
+ - `num_stages` (`Int`): Number of stages in the PERK method.
+ - `base_path_monomial_coeffs` (`AbstractString`): Path to a file containing
+ monomial coefficients of the stability polynomial of PERK method.
+ The coefficients should be stored in a text file at `joinpath(base_path_monomial_coeffs, "gamma_$(num_stages).txt")` and separated by line breaks.
+ - `tspan`: Time span of the simulation.
+ - `semi` (`AbstractSemidiscretization`): Semidiscretization setup.
+ - `eig_vals` (`Vector{ComplexF64}`): Eigenvalues of the Jacobian of the right-hand side (rhs) of the ODEProblem after the
+ equation has been semidiscretized.
+ - `verbose` (`Bool`, optional): Verbosity flag, default is false.
+ - `bS` (`Float64`, optional): Value of b in the Butcher tableau at b_s, when
+ s is the number of stages, default is 1.0.
+ - `cS` (`Float64`, optional): Value of c in the Butcher tableau at c_s, when
+ s is the number of stages, default is 0.5.
+
+The following structures and methods provide a minimal implementation of
+the second-order paired explicit Runge-Kutta (PERK) method
+optimized for a certain simulation setup (PDE, IC & BC, Riemann Solver, DG Solver).
+
+- Brian Vermeire (2019).
+ Paired explicit Runge-Kutta schemes for stiff systems of equations
+ [DOI: 10.1016/j.jcp.2019.05.014](https://doi.org/10.1016/j.jcp.2019.05.014)
+"""
+mutable struct PairedExplicitRK2 <: AbstractPairedExplicitRKSingle
+ const num_stages::Int
+
+ a_matrix::Matrix{Float64}
+ c::Vector{Float64}
+ b1::Float64
+ bS::Float64
+ cS::Float64
+end # struct PairedExplicitRK2
+
+# Constructor that reads the coefficients from a file
+function PairedExplicitRK2(num_stages, base_path_monomial_coeffs::AbstractString,
+ bS = 1.0, cS = 0.5)
+ a_matrix, c = compute_PairedExplicitRK2_butcher_tableau(num_stages,
+ base_path_monomial_coeffs,
+ bS, cS)
+
+ return PairedExplicitRK2(num_stages, a_matrix, c, 1 - bS, bS, cS)
+end
+
+# Constructor that calculates the coefficients with polynomial optimizer from a
+# semidiscretization
+function PairedExplicitRK2(num_stages, tspan, semi::AbstractSemidiscretization;
+ verbose = false,
+ bS = 1.0, cS = 0.5)
+ eig_vals = eigvals(jacobian_ad_forward(semi))
+
+ return PairedExplicitRK2(num_stages, tspan, eig_vals; verbose, bS, cS)
+end
+
+# Constructor that calculates the coefficients with polynomial optimizer from a
+# list of eigenvalues
+function PairedExplicitRK2(num_stages, tspan, eig_vals::Vector{ComplexF64};
+ verbose = false,
+ bS = 1.0, cS = 0.5)
+ a_matrix, c = compute_PairedExplicitRK2_butcher_tableau(num_stages,
+ eig_vals, tspan,
+ bS, cS;
+ verbose)
+
+ return PairedExplicitRK2(num_stages, a_matrix, c, 1 - bS, bS, cS)
+end
+
+# This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L1
+mutable struct PairedExplicitRKOptions{Callback}
+ callback::Callback # callbacks; used in Trixi
+ adaptive::Bool # whether the algorithm is adaptive; ignored
+ dtmax::Float64 # ignored
+ maxiters::Int # maximal number of time steps
+ tstops::Vector{Float64} # tstops from https://diffeq.sciml.ai/v6.8/basics/common_solver_opts/#Output-Control-1; ignored
+end
+
+function PairedExplicitRKOptions(callback, tspan; maxiters = typemax(Int), kwargs...)
+ PairedExplicitRKOptions{typeof(callback)}(callback, false, Inf, maxiters,
+ [last(tspan)])
+end
+
+abstract type PairedExplicitRK end
+abstract type AbstractPairedExplicitRKSingleIntegrator <: PairedExplicitRK end
+
+# This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L77
+# This implements the interface components described at
+# https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1
+# which are used in Trixi.
+mutable struct PairedExplicitRK2Integrator{RealT <: Real, uType, Params, Sol, F, Alg,
+ PairedExplicitRKOptions} <:
+ AbstractPairedExplicitRKSingleIntegrator
+ u::uType
+ du::uType
+ u_tmp::uType
+ t::RealT
+ dt::RealT # current time step
+ dtcache::RealT # ignored
+ iter::Int # current number of time steps (iteration)
+ p::Params # will be the semidiscretization from Trixi
+ sol::Sol # faked
+ f::F
+ alg::Alg # This is our own class written above; Abbreviation for ALGorithm
+ opts::PairedExplicitRKOptions
+ finalstep::Bool # added for convenience
+ # PairedExplicitRK2 stages:
+ k1::uType
+ k_higher::uType
+end
+
+# Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771)
+function Base.getproperty(integrator::PairedExplicitRK, field::Symbol)
+ if field === :stats
+ return (naccept = getfield(integrator, :iter),)
+ end
+ # general fallback
+ return getfield(integrator, field)
+end
+
+function init(ode::ODEProblem, alg::PairedExplicitRK2;
+ dt, callback = nothing, kwargs...)
+ u0 = copy(ode.u0)
+ du = zero(u0)
+ u_tmp = zero(u0)
+
+ # PairedExplicitRK2 stages
+ k1 = zero(u0)
+ k_higher = zero(u0)
+
+ t0 = first(ode.tspan)
+ iter = 0
+
+ integrator = PairedExplicitRK2Integrator(u0, du, u_tmp, t0, dt, zero(dt), iter,
+ ode.p,
+ (prob = ode,), ode.f, alg,
+ PairedExplicitRKOptions(callback,
+ ode.tspan;
+ kwargs...),
+ false,
+ k1, k_higher)
+
+ # initialize callbacks
+ if callback isa CallbackSet
+ for cb in callback.continuous_callbacks
+ error("unsupported")
+ end
+ for cb in callback.discrete_callbacks
+ cb.initialize(cb, integrator.u, integrator.t, integrator)
+ end
+ elseif !isnothing(callback)
+ error("unsupported")
+ end
+
+ return integrator
+end
+
+# Fakes `solve`: https://diffeq.sciml.ai/v6.8/basics/overview/#Solving-the-Problems-1
+function solve(ode::ODEProblem, alg::PairedExplicitRK2;
+ dt, callback = nothing, kwargs...)
+ integrator = init(ode, alg, dt = dt, callback = callback; kwargs...)
+
+ # Start actual solve
+ solve_steps!(integrator)
+end
+
+function solve_steps!(integrator::PairedExplicitRK2Integrator)
+ @unpack prob = integrator.sol
+
+ integrator.finalstep = false
+
+ @trixi_timeit timer() "main loop" while !integrator.finalstep
+ step!(integrator)
+ end # "main loop" timer
+
+ return TimeIntegratorSolution((first(prob.tspan), integrator.t),
+ (prob.u0, integrator.u),
+ integrator.sol.prob)
+end
+
+function step!(integrator::PairedExplicitRK2Integrator)
+ @unpack prob = integrator.sol
+ @unpack alg = integrator
+ t_end = last(prob.tspan)
+ callbacks = integrator.opts.callback
+
+ integrator.finalstep = false
+
+ @assert !integrator.finalstep
+ if isnan(integrator.dt)
+ error("time step size `dt` is NaN")
+ end
+
+ # if the next iteration would push the simulation beyond the end time, set dt accordingly
+ if integrator.t + integrator.dt > t_end ||
+ isapprox(integrator.t + integrator.dt, t_end)
+ integrator.dt = t_end - integrator.t
+ terminate!(integrator)
+ end
+
+ @trixi_timeit timer() "Paired Explicit Runge-Kutta ODE integration step" begin
+ # k1
+ integrator.f(integrator.du, integrator.u, prob.p, integrator.t)
+ @threaded for i in eachindex(integrator.du)
+ integrator.k1[i] = integrator.du[i] * integrator.dt
+ end
+
+ # Construct current state
+ @threaded for i in eachindex(integrator.u)
+ integrator.u_tmp[i] = integrator.u[i] + alg.c[2] * integrator.k1[i]
+ end
+ # k2
+ integrator.f(integrator.du, integrator.u_tmp, prob.p,
+ integrator.t + alg.c[2] * integrator.dt)
+
+ @threaded for i in eachindex(integrator.du)
+ integrator.k_higher[i] = integrator.du[i] * integrator.dt
+ end
+
+ # Higher stages
+ for stage in 3:(alg.num_stages)
+ # Construct current state
+ @threaded for i in eachindex(integrator.u)
+ integrator.u_tmp[i] = integrator.u[i] +
+ alg.a_matrix[stage - 2, 1] *
+ integrator.k1[i] +
+ alg.a_matrix[stage - 2, 2] *
+ integrator.k_higher[i]
+ end
+
+ integrator.f(integrator.du, integrator.u_tmp, prob.p,
+ integrator.t + alg.c[stage] * integrator.dt)
+
+ @threaded for i in eachindex(integrator.du)
+ integrator.k_higher[i] = integrator.du[i] * integrator.dt
+ end
+ end
+
+ @threaded for i in eachindex(integrator.u)
+ integrator.u[i] += alg.b1 * integrator.k1[i] +
+ alg.bS * integrator.k_higher[i]
+ end
+ end # PairedExplicitRK2 step
+
+ integrator.iter += 1
+ integrator.t += integrator.dt
+
+ @trixi_timeit timer() "Step-Callbacks" begin
+ # handle callbacks
+ if callbacks isa CallbackSet
+ foreach(callbacks.discrete_callbacks) do cb
+ if cb.condition(integrator.u, integrator.t, integrator)
+ cb.affect!(integrator)
+ end
+ return nothing
+ end
+ end
+ end
+
+ # respect maximum number of iterations
+ if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep
+ @warn "Interrupted. Larger maxiters is needed."
+ terminate!(integrator)
+ end
+end
+
+# get a cache where the RHS can be stored
+get_du(integrator::PairedExplicitRK) = integrator.du
+get_tmp_cache(integrator::PairedExplicitRK) = (integrator.u_tmp,)
+
+# some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u
+u_modified!(integrator::PairedExplicitRK, ::Bool) = false
+
+# used by adaptive timestepping algorithms in DiffEq
+function set_proposed_dt!(integrator::PairedExplicitRK, dt)
+ integrator.dt = dt
+end
+
+function get_proposed_dt(integrator::PairedExplicitRK)
+ return integrator.dt
+end
+
+# stop the time integration
+function terminate!(integrator::PairedExplicitRK)
+ integrator.finalstep = true
+ empty!(integrator.opts.tstops)
+end
+
+# used for AMR (Adaptive Mesh Refinement)
+function Base.resize!(integrator::PairedExplicitRK2Integrator, new_size)
+ resize!(integrator.u, new_size)
+ resize!(integrator.du, new_size)
+ resize!(integrator.u_tmp, new_size)
+
+ resize!(integrator.k1, new_size)
+ resize!(integrator.k_higher, new_size)
+end
+end # @muladd
diff --git a/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl b/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl
new file mode 100644
index 00000000000..b73ea758312
--- /dev/null
+++ b/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl
@@ -0,0 +1,12 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# Basic implementation of the second-order paired explicit Runge-Kutta (PERK) method
+include("methods_PERK2.jl")
+# Define all of the functions necessary for polynomial optimizations
+include("polynomial_optimizer.jl")
+end # @muladd
diff --git a/src/time_integration/paired_explicit_runge_kutta/polynomial_optimizer.jl b/src/time_integration/paired_explicit_runge_kutta/polynomial_optimizer.jl
new file mode 100644
index 00000000000..bfd53ba2eaf
--- /dev/null
+++ b/src/time_integration/paired_explicit_runge_kutta/polynomial_optimizer.jl
@@ -0,0 +1,28 @@
+# Filter out eigenvalues with positive real parts, those with negative imaginary
+# parts due to eigenvalues' symmetry around the real axis, or the eigenvalues
+# that are smaller than a specified threshold.
+function filter_eig_vals(eig_vals, threshold = 1e-12; verbose = false)
+ filtered_eig_vals = Complex{Float64}[]
+
+ for eig_val in eig_vals
+ if real(eig_val) < 0 && imag(eig_val) > 0 && abs(eig_val) >= threshold
+ push!(filtered_eig_vals, eig_val)
+ end
+ end
+
+ filtered_eig_vals_count = length(eig_vals) - length(filtered_eig_vals)
+
+ if verbose
+ println("$filtered_eig_vals_count eigenvalue(s) are not passed on because " *
+ "they either are in magnitude smaller than $threshold, have positive " *
+ "real parts, or have negative imaginary parts.\n")
+ end
+
+ return length(filtered_eig_vals), filtered_eig_vals
+end
+
+# Add definitions of functions related to polynomial optimization by Convex and ECOS here
+# such that hey can be exported from Trixi.jl and extended in the TrixiConvexECOSExt package
+# extension or by the Convex and ECOS-specific code loaded by Requires.jl
+function undo_normalization! end
+function bisect_stability_polynomial end
diff --git a/src/time_integration/time_integration.jl b/src/time_integration/time_integration.jl
index c1e53527121..d19a1fcc37c 100644
--- a/src/time_integration/time_integration.jl
+++ b/src/time_integration/time_integration.jl
@@ -16,4 +16,5 @@ end
include("methods_2N.jl")
include("methods_3Sstar.jl")
include("methods_SSP.jl")
+include("paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl")
end # @muladd
diff --git a/test/Project.toml b/test/Project.toml
index 1491d7a5c5f..5fc2bb18bdf 100644
--- a/test/Project.toml
+++ b/test/Project.toml
@@ -1,7 +1,10 @@
[deps]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
+Convex = "f65535da-76fb-5f13-bab9-19810c17039a"
+DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
+ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
FFMPEG = "c87230d0-a227-11e9-1b43-d7ebe4e7570a"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
@@ -16,7 +19,10 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[compat]
Aqua = "0.8"
CairoMakie = "0.10"
+Convex = "0.15.4"
+DelimitedFiles = "1"
Downloads = "1"
+ECOS = "1.1.2"
ExplicitImports = "1.0.1"
FFMPEG = "0.4"
ForwardDiff = "0.10.24"
diff --git a/test/test_tree_1d_advection.jl b/test/test_tree_1d_advection.jl
index a580a3b5600..afa92efeddb 100644
--- a/test/test_tree_1d_advection.jl
+++ b/test/test_tree_1d_advection.jl
@@ -81,6 +81,20 @@ end
@test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
end
end
+
+@trixi_testset "elixir_advection_perk2.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_perk2.jl"),
+ l2=[0.014139244532882265],
+ linf=[0.019997568971592217])
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 8000
+ end
+end
end
end # module
diff --git a/test/test_trixi.jl b/test/test_trixi.jl
index 78195825886..9bfd73ea28d 100644
--- a/test/test_trixi.jl
+++ b/test/test_trixi.jl
@@ -155,7 +155,10 @@ macro test_nowarn_mod(expr, additional_ignore_content = String[])
# TODO: Silence warning introduced by Flux v0.13.13. Should be properly fixed.
r"┌ Warning: Layer with Float32 parameters got Float64 input.+\n│.+\n│.+\n│.+\n└ @ Flux.+\n",
# NOTE: These warnings arose from Julia 1.10 onwards
- r"WARNING: Method definition .* in module .* at .* overwritten .*.\n"]
+ r"WARNING: Method definition .* in module .* at .* overwritten .*.\n",
+ # Warnings from third party packages
+ r"┌ Warning: Problem status ALMOST_INFEASIBLE; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n",
+ r"┌ Warning: Problem status ALMOST_OPTIMAL; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n"]
append!(ignore_content, $additional_ignore_content)
for pattern in ignore_content
stderr_content = replace(stderr_content, pattern => "")
diff --git a/test/test_unit.jl b/test/test_unit.jl
index 90ee21030d3..de13d41e931 100644
--- a/test/test_unit.jl
+++ b/test/test_unit.jl
@@ -3,6 +3,13 @@ module TestUnit
using Test
using Trixi
+using DelimitedFiles: readdlm
+
+# Use Convex and ECOS to load the extension that extends functions for testing
+# PERK Single p2 Constructors
+using Convex: Convex
+using ECOS: Optimizer
+
include("test_trixi.jl")
# Start with a clean environment: remove Trixi.jl output directory if it exists
@@ -1634,6 +1641,39 @@ end
@test mesh.boundary_faces[:entire_boundary] == [1, 2]
end
+@testset "PERK Single p2 Constructors" begin
+ path_coeff_file = mktempdir()
+ Trixi.download("https://gist.githubusercontent.com/DanielDoehring/8db0808b6f80e59420c8632c0d8e2901/raw/39aacf3c737cd642636dd78592dbdfe4cb9499af/MonCoeffsS6p2.txt",
+ joinpath(path_coeff_file, "gamma_6.txt"))
+
+ ode_algorithm = Trixi.PairedExplicitRK2(6, path_coeff_file)
+
+ @test isapprox(ode_algorithm.a_matrix,
+ [0.12405417889682908 0.07594582110317093
+ 0.16178873711001726 0.13821126288998273
+ 0.16692313960864164 0.2330768603913584
+ 0.12281292901258256 0.37718707098741744], atol = 1e-13)
+
+ Trixi.download("https://gist.githubusercontent.com/DanielDoehring/c7a89eaaa857e87dde055f78eae9b94a/raw/2937f8872ffdc08e0dcf444ee35f9ebfe18735b0/Spectrum_2D_IsentropicVortex_CEE.txt",
+ joinpath(path_coeff_file, "spectrum_2d.txt"))
+
+ eig_vals = readdlm(joinpath(path_coeff_file, "spectrum_2d.txt"), ComplexF64)
+ tspan = (0.0, 1.0)
+ ode_algorithm = Trixi.PairedExplicitRK2(12, tspan, vec(eig_vals))
+
+ @test isapprox(ode_algorithm.a_matrix,
+ [0.06453812656705388 0.02637096434203703
+ 0.09470601372266194 0.04165762264097442
+ 0.12332877820057538 0.05848940361760645
+ 0.1498701503275483 0.07740257694517898
+ 0.173421149536068 0.09930612319120471
+ 0.19261978147927503 0.12556203670254315
+ 0.2052334022622969 0.15840296137406676
+ 0.2073489042901963 0.2017420048007128
+ 0.19135142349998963 0.2631940310454649
+ 0.13942836392940833 0.3605716360705917], atol = 1e-13)
+end
+
@testset "Sutherlands Law" begin
function mu(u, equations)
T_ref = 291.15
From 2da08630c97d3808fac9b52661fac32421098f26 Mon Sep 17 00:00:00 2001
From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com>
Date: Fri, 24 May 2024 05:45:31 +0200
Subject: [PATCH 21/44] Add subcell limiting support for StructuredMesh (#1946)
* Add structured mesh support
* Fix non-periodic computation of bounds
* Use local limiting and nonperiodic domain in source terms elixir
* Use local limiting in free stream elixir
* Remove not needed lines
* Remove P4estMesh
* Add non-periodic tests with local bounds
* fmt
* Fix test
* Use `get_inverse_jacobian` instead of dispatching all routines
* Simplify `perform_idp_correction!`
* Revert stuff
* Remove free stream elixir
* Use sedov blast instead of source term setup; add news
* Update dispatching for mesh types
* Move new tests within test file
* Adapt dispatching
* Fix typo
* Remove not-needed parameters
---
NEWS.md | 1 +
...lixir_euler_sedov_blast_wave_sc_subcell.jl | 114 +++++++++
src/callbacks_stage/subcell_bounds_check.jl | 12 +-
.../subcell_bounds_check_2d.jl | 4 +-
.../subcell_limiter_idp_correction_2d.jl | 27 ++-
src/solvers/dgsem_structured/dg.jl | 3 +
.../dg_2d_subcell_limiters.jl | 111 +++++++++
.../dgsem_structured/subcell_limiters_2d.jl | 220 ++++++++++++++++++
.../dgsem_tree/dg_2d_subcell_limiters.jl | 15 +-
src/solvers/dgsem_tree/subcell_limiters_2d.jl | 31 ++-
test/test_structured_2d.jl | 56 +++++
11 files changed, 557 insertions(+), 37 deletions(-)
create mode 100644 examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
create mode 100644 src/solvers/dgsem_structured/dg_2d_subcell_limiters.jl
create mode 100644 src/solvers/dgsem_structured/subcell_limiters_2d.jl
diff --git a/NEWS.md b/NEWS.md
index ebc8d9cda39..ecbd70ce472 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -14,6 +14,7 @@ for human readability.
- Subcell local one-sided limiting support for nonlinear variables in 2D for `TreeMesh` ([#1792]).
- New time integrator `PairedExplicitRK2`, implementing the second-order paired explicit Runge-Kutta
method with [Convex.jl](https://github.com/jump-dev/Convex.jl) and [ECOS.jl](https://github.com/jump-dev/ECOS.jl) ([#1908])
+- Add subcell limiting support for `StructuredMesh` ([#1946]).
## Changes when updating to v0.7 from v0.6.x
diff --git a/examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl b/examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
new file mode 100644
index 00000000000..5c11a7d15a7
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl
@@ -0,0 +1,114 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the compressible Euler equations
+gamma = 1.4
+equations = CompressibleEulerEquations2D(gamma)
+
+"""
+ initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D)
+
+The Sedov blast wave setup based on Flash
+- https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000
+"""
+function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D)
+ # Set up polar coordinates
+ inicenter = SVector(0.0, 0.0)
+ x_norm = x[1] - inicenter[1]
+ y_norm = x[2] - inicenter[2]
+ r = sqrt(x_norm^2 + y_norm^2)
+
+ # Setup based on https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000
+ r0 = 0.21875 # = 3.5 * smallest dx (for domain length=4 and max-ref=6)
+ # r0 = 0.5 # = more reasonable setup
+ E = 1.0
+ p0_inner = 3 * (equations.gamma - 1) * E / (3 * pi * r0^2)
+ p0_outer = 1.0e-5 # = true Sedov setup
+ # p0_outer = 1.0e-3 # = more reasonable setup
+
+ # Calculate primitive variables
+ rho = 1.0
+ v1 = 0.0
+ v2 = 0.0
+ p = r > r0 ? p0_outer : p0_inner
+
+ return prim2cons(SVector(rho, v1, v2, p), equations)
+end
+initial_condition = initial_condition_sedov_blast_wave
+
+boundary_condition = BoundaryConditionDirichlet(initial_condition)
+boundary_conditions = (x_neg = boundary_condition,
+ x_pos = boundary_condition,
+ y_neg = boundary_condition,
+ y_pos = boundary_condition)
+
+surface_flux = flux_lax_friedrichs
+volume_flux = flux_ranocha
+polydeg = 3
+basis = LobattoLegendreBasis(polydeg)
+limiter_idp = SubcellLimiterIDP(equations, basis;
+ local_twosided_variables_cons = ["rho"],
+ local_onesided_variables_nonlinear = [(Trixi.entropy_guermond_etal,
+ min)],
+ max_iterations_newton = 40, # Default value of 10 iterations is too low to fulfill bounds.
+ positivity_variables_cons = [],
+ positivity_variables_nonlinear = [])
+# Variables for global limiting (`positivity_variables_cons` and
+# `positivity_variables_nonlinear`) are overwritten and used in the tests.
+
+volume_integral = VolumeIntegralSubcellLimiting(limiter_idp;
+ volume_flux_dg = volume_flux,
+ volume_flux_fv = surface_flux)
+solver = DGSEM(basis, surface_flux, volume_integral)
+
+# Get the curved quad mesh from a mapping function
+# Mapping as described in https://arxiv.org/abs/2012.12040
+function mapping(xi, eta)
+ y = eta + 0.125 * (cos(1.5 * pi * xi) * cos(0.5 * pi * eta))
+
+ x = xi + 0.125 * (cos(0.5 * pi * xi) * cos(2 * pi * y))
+
+ return SVector(x, y)
+end
+
+cells_per_dimension = (16, 16)
+mesh = StructuredMesh(cells_per_dimension, mapping, periodicity = false)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = boundary_conditions)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 3.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 100,
+ save_initial_solution = true,
+ save_final_solution = true,
+ solution_variables = cons2prim)
+
+stepsize_callback = StepsizeCallback(cfl = 0.7)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ save_solution,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback())
+
+sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks);
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl
index 3f3e151436f..6268fde6dd7 100644
--- a/src/callbacks_stage/subcell_bounds_check.jl
+++ b/src/callbacks_stage/subcell_bounds_check.jl
@@ -38,7 +38,7 @@ function (callback::BoundsCheckCallback)(u_ode, integrator, stage)
(; t, iter, alg) = integrator
u = wrap_array(u_ode, mesh, equations, solver, cache)
- @trixi_timeit timer() "check_bounds" check_bounds(u, mesh, equations, solver, cache,
+ @trixi_timeit timer() "check_bounds" check_bounds(u, equations, solver, cache,
solver.volume_integral)
save_errors = callback.save_errors && (callback.interval > 0) &&
@@ -48,20 +48,20 @@ function (callback::BoundsCheckCallback)(u_ode, integrator, stage)
(iter + 1) >= integrator.opts.maxiters) # Maximum iterations reached
if save_errors
@trixi_timeit timer() "save_errors" save_bounds_check_errors(callback.output_directory,
- u, t, iter + 1,
+ t, iter + 1,
equations,
solver.volume_integral)
end
end
-@inline function check_bounds(u, mesh, equations, solver, cache,
+@inline function check_bounds(u, equations, solver, cache,
volume_integral::VolumeIntegralSubcellLimiting)
- check_bounds(u, mesh, equations, solver, cache, volume_integral.limiter)
+ check_bounds(u, equations, solver, cache, volume_integral.limiter)
end
-@inline function save_bounds_check_errors(output_directory, u, t, iter, equations,
+@inline function save_bounds_check_errors(output_directory, t, iter, equations,
volume_integral::VolumeIntegralSubcellLimiting)
- save_bounds_check_errors(output_directory, u, t, iter, equations,
+ save_bounds_check_errors(output_directory, t, iter, equations,
volume_integral.limiter)
end
diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl
index c81ebc970a0..9c3664ab0c3 100644
--- a/src/callbacks_stage/subcell_bounds_check_2d.jl
+++ b/src/callbacks_stage/subcell_bounds_check_2d.jl
@@ -5,7 +5,7 @@
@muladd begin
#! format: noindent
-@inline function check_bounds(u, mesh::AbstractMesh{2}, equations, solver, cache,
+@inline function check_bounds(u, equations, solver, cache,
limiter::SubcellLimiterIDP)
(; local_twosided, positivity, local_onesided) = solver.volume_integral.limiter
(; variable_bounds) = limiter.cache.subcell_limiter_coefficients
@@ -103,7 +103,7 @@
return nothing
end
-@inline function save_bounds_check_errors(output_directory, u, time, iter, equations,
+@inline function save_bounds_check_errors(output_directory, time, iter, equations,
limiter::SubcellLimiterIDP)
(; local_twosided, positivity, local_onesided) = limiter
(; idp_bounds_delta_local) = limiter.cache
diff --git a/src/callbacks_stage/subcell_limiter_idp_correction_2d.jl b/src/callbacks_stage/subcell_limiter_idp_correction_2d.jl
index 6f1723e2a98..0eb048ca5b8 100644
--- a/src/callbacks_stage/subcell_limiter_idp_correction_2d.jl
+++ b/src/callbacks_stage/subcell_limiter_idp_correction_2d.jl
@@ -5,29 +5,32 @@
@muladd begin
#! format: noindent
-function perform_idp_correction!(u, dt, mesh::TreeMesh2D, equations, dg, cache)
+function perform_idp_correction!(u, dt,
+ mesh::Union{TreeMesh{2}, StructuredMesh{2}},
+ equations, dg, cache)
@unpack inverse_weights = dg.basis
@unpack antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R = cache.antidiffusive_fluxes
@unpack alpha1, alpha2 = dg.volume_integral.limiter.cache.subcell_limiter_coefficients
@threaded for element in eachelement(dg, cache)
- # Sign switch as in apply_jacobian!
- inverse_jacobian = -cache.elements.inverse_jacobian[element]
-
for j in eachnode(dg), i in eachnode(dg)
+ # Sign switch as in apply_jacobian!
+ inverse_jacobian = -get_inverse_jacobian(cache.elements.inverse_jacobian,
+ mesh, i, j, element)
+
# Note: antidiffusive_flux1[v, i, xi, element] = antidiffusive_flux2[v, xi, i, element] = 0 for all i in 1:nnodes and xi in {1, nnodes+1}
alpha_flux1 = (1 - alpha1[i, j, element]) *
- get_node_vars(antidiffusive_flux1_R, equations, dg, i, j,
- element)
+ get_node_vars(antidiffusive_flux1_R, equations, dg,
+ i, j, element)
alpha_flux1_ip1 = (1 - alpha1[i + 1, j, element]) *
- get_node_vars(antidiffusive_flux1_L, equations, dg, i + 1,
- j, element)
+ get_node_vars(antidiffusive_flux1_L, equations, dg,
+ i + 1, j, element)
alpha_flux2 = (1 - alpha2[i, j, element]) *
- get_node_vars(antidiffusive_flux2_R, equations, dg, i, j,
- element)
+ get_node_vars(antidiffusive_flux2_R, equations, dg,
+ i, j, element)
alpha_flux2_jp1 = (1 - alpha2[i, j + 1, element]) *
- get_node_vars(antidiffusive_flux2_L, equations, dg, i,
- j + 1, element)
+ get_node_vars(antidiffusive_flux2_L, equations, dg,
+ i, j + 1, element)
for v in eachvariable(equations)
u[v, i, j, element] += dt * inverse_jacobian *
diff --git a/src/solvers/dgsem_structured/dg.jl b/src/solvers/dgsem_structured/dg.jl
index 424ed5e1e7e..5617ae90e3f 100644
--- a/src/solvers/dgsem_structured/dg.jl
+++ b/src/solvers/dgsem_structured/dg.jl
@@ -93,6 +93,9 @@ include("indicators_1d.jl")
include("indicators_2d.jl")
include("indicators_3d.jl")
+include("subcell_limiters_2d.jl")
+include("dg_2d_subcell_limiters.jl")
+
# Specialized implementations used to improve performance
include("dg_2d_compressible_euler.jl")
include("dg_3d_compressible_euler.jl")
diff --git a/src/solvers/dgsem_structured/dg_2d_subcell_limiters.jl b/src/solvers/dgsem_structured/dg_2d_subcell_limiters.jl
new file mode 100644
index 00000000000..4da402425ea
--- /dev/null
+++ b/src/solvers/dgsem_structured/dg_2d_subcell_limiters.jl
@@ -0,0 +1,111 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+# Calculate the DG staggered volume fluxes `fhat` in subcell FV-form inside the element
+# (**without non-conservative terms**).
+#
+# See also `flux_differencing_kernel!`.
+@inline function calcflux_fhat!(fhat1_L, fhat1_R, fhat2_L, fhat2_R, u,
+ mesh::StructuredMesh{2},
+ nonconservative_terms::False, equations,
+ volume_flux, dg::DGSEM, element, cache)
+ (; contravariant_vectors) = cache.elements
+ (; weights, derivative_split) = dg.basis
+ (; flux_temp_threaded) = cache
+
+ flux_temp = flux_temp_threaded[Threads.threadid()]
+
+ # The FV-form fluxes are calculated in a recursive manner, i.e.:
+ # fhat_(0,1) = w_0 * FVol_0,
+ # fhat_(j,j+1) = fhat_(j-1,j) + w_j * FVol_j, for j=1,...,N-1,
+ # with the split form volume fluxes FVol_j = -2 * sum_i=0^N D_ji f*_(j,i).
+
+ # To use the symmetry of the `volume_flux`, the split form volume flux is precalculated
+ # like in `calc_volume_integral!` for the `VolumeIntegralFluxDifferencing`
+ # and saved in in `flux_temp`.
+
+ # Split form volume flux in orientation 1: x direction
+ flux_temp .= zero(eltype(flux_temp))
+
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+
+ # pull the contravariant vectors in each coordinate direction
+ Ja1_node = get_contravariant_vector(1, contravariant_vectors, i, j, element) # x direction
+
+ # All diagonal entries of `derivative_split` are zero. Thus, we can skip
+ # the computation of the diagonal terms. In addition, we use the symmetry
+ # of the `volume_flux` to save half of the possible two-point flux
+ # computations.
+
+ # x direction
+ for ii in (i + 1):nnodes(dg)
+ u_node_ii = get_node_vars(u, equations, dg, ii, j, element)
+ # pull the contravariant vectors and compute the average
+ Ja1_node_ii = get_contravariant_vector(1, contravariant_vectors, ii, j,
+ element)
+ Ja1_avg = 0.5 * (Ja1_node + Ja1_node_ii)
+
+ # compute the contravariant sharp flux in the direction of the averaged contravariant vector
+ fluxtilde1 = volume_flux(u_node, u_node_ii, Ja1_avg, equations)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[i, ii], fluxtilde1,
+ equations, dg, i, j)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[ii, i], fluxtilde1,
+ equations, dg, ii, j)
+ end
+ end
+
+ # FV-form flux `fhat` in x direction
+ fhat1_L[:, 1, :] .= zero(eltype(fhat1_L))
+ fhat1_L[:, nnodes(dg) + 1, :] .= zero(eltype(fhat1_L))
+ fhat1_R[:, 1, :] .= zero(eltype(fhat1_R))
+ fhat1_R[:, nnodes(dg) + 1, :] .= zero(eltype(fhat1_R))
+
+ for j in eachnode(dg), i in 1:(nnodes(dg) - 1), v in eachvariable(equations)
+ fhat1_L[v, i + 1, j] = fhat1_L[v, i, j] + weights[i] * flux_temp[v, i, j]
+ fhat1_R[v, i + 1, j] = fhat1_L[v, i + 1, j]
+ end
+
+ # Split form volume flux in orientation 2: y direction
+ flux_temp .= zero(eltype(flux_temp))
+
+ for j in eachnode(dg), i in eachnode(dg)
+ u_node = get_node_vars(u, equations, dg, i, j, element)
+
+ # pull the contravariant vectors in each coordinate direction
+ Ja2_node = get_contravariant_vector(2, contravariant_vectors, i, j, element)
+
+ # y direction
+ for jj in (j + 1):nnodes(dg)
+ u_node_jj = get_node_vars(u, equations, dg, i, jj, element)
+ # pull the contravariant vectors and compute the average
+ Ja2_node_jj = get_contravariant_vector(2, contravariant_vectors, i, jj,
+ element)
+ Ja2_avg = 0.5 * (Ja2_node + Ja2_node_jj)
+ # compute the contravariant sharp flux in the direction of the averaged contravariant vector
+ fluxtilde2 = volume_flux(u_node, u_node_jj, Ja2_avg, equations)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[j, jj], fluxtilde2,
+ equations, dg, i, j)
+ multiply_add_to_node_vars!(flux_temp, derivative_split[jj, j], fluxtilde2,
+ equations, dg, i, jj)
+ end
+ end
+
+ # FV-form flux `fhat` in y direction
+ fhat2_L[:, :, 1] .= zero(eltype(fhat2_L))
+ fhat2_L[:, :, nnodes(dg) + 1] .= zero(eltype(fhat2_L))
+ fhat2_R[:, :, 1] .= zero(eltype(fhat2_R))
+ fhat2_R[:, :, nnodes(dg) + 1] .= zero(eltype(fhat2_R))
+
+ for j in 1:(nnodes(dg) - 1), i in eachnode(dg), v in eachvariable(equations)
+ fhat2_L[v, i, j + 1] = fhat2_L[v, i, j] + weights[j] * flux_temp[v, i, j]
+ fhat2_R[v, i, j + 1] = fhat2_L[v, i, j + 1]
+ end
+
+ return nothing
+end
+end # @muladd
diff --git a/src/solvers/dgsem_structured/subcell_limiters_2d.jl b/src/solvers/dgsem_structured/subcell_limiters_2d.jl
new file mode 100644
index 00000000000..5b65a475062
--- /dev/null
+++ b/src/solvers/dgsem_structured/subcell_limiters_2d.jl
@@ -0,0 +1,220 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi,
+ mesh::StructuredMesh{2})
+ _, equations, dg, cache = mesh_equations_solver_cache(semi)
+ (; boundary_conditions) = semi
+
+ # Calc bounds at interfaces and periodic boundaries
+ for element in eachelement(dg, cache)
+ # Get neighboring element ids
+ left = cache.elements.left_neighbors[1, element]
+ lower = cache.elements.left_neighbors[2, element]
+
+ if left != 0
+ for j in eachnode(dg)
+ var_left = u[variable, nnodes(dg), j, left]
+ var_element = u[variable, 1, j, element]
+
+ var_min[1, j, element] = min(var_min[1, j, element], var_left)
+ var_max[1, j, element] = max(var_max[1, j, element], var_left)
+
+ var_min[nnodes(dg), j, left] = min(var_min[nnodes(dg), j, left],
+ var_element)
+ var_max[nnodes(dg), j, left] = max(var_max[nnodes(dg), j, left],
+ var_element)
+ end
+ end
+ if lower != 0
+ for i in eachnode(dg)
+ var_lower = u[variable, i, nnodes(dg), lower]
+ var_element = u[variable, i, 1, element]
+
+ var_min[i, 1, element] = min(var_min[i, 1, element], var_lower)
+ var_max[i, 1, element] = max(var_max[i, 1, element], var_lower)
+
+ var_min[i, nnodes(dg), lower] = min(var_min[i, nnodes(dg), lower],
+ var_element)
+ var_max[i, nnodes(dg), lower] = max(var_max[i, nnodes(dg), lower],
+ var_element)
+ end
+ end
+ end
+
+ # Calc bounds at physical boundaries
+ if isperiodic(mesh)
+ return nothing
+ end
+ linear_indices = LinearIndices(size(mesh))
+ if !isperiodic(mesh, 1)
+ # - xi direction
+ for cell_y in axes(mesh, 2)
+ element = linear_indices[begin, cell_y]
+ for j in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[1],
+ cache, t, equations, dg,
+ 1, j, element)
+ var_outer = u_outer[variable]
+
+ var_min[1, j, element] = min(var_min[1, j, element], var_outer)
+ var_max[1, j, element] = max(var_max[1, j, element], var_outer)
+ end
+ end
+ # + xi direction
+ for cell_y in axes(mesh, 2)
+ element = linear_indices[end, cell_y]
+ for j in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[2],
+ cache, t, equations, dg,
+ nnodes(dg), j, element)
+ var_outer = u_outer[variable]
+
+ var_min[nnodes(dg), j, element] = min(var_min[nnodes(dg), j, element],
+ var_outer)
+ var_max[nnodes(dg), j, element] = max(var_max[nnodes(dg), j, element],
+ var_outer)
+ end
+ end
+ end
+ if !isperiodic(mesh, 2)
+ # - eta direction
+ for cell_x in axes(mesh, 1)
+ element = linear_indices[cell_x, begin]
+ for i in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[3],
+ cache, t, equations, dg,
+ i, 1, element)
+ var_outer = u_outer[variable]
+
+ var_min[i, 1, element] = min(var_min[i, 1, element], var_outer)
+ var_max[i, 1, element] = max(var_max[i, 1, element], var_outer)
+ end
+ end
+ # - eta direction
+ for cell_x in axes(mesh, 1)
+ element = linear_indices[cell_x, end]
+ for i in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[4],
+ cache, t, equations, dg,
+ i, nnodes(dg), element)
+ var_outer = u_outer[variable]
+
+ var_min[i, nnodes(dg), element] = min(var_min[i, nnodes(dg), element],
+ var_outer)
+ var_max[i, nnodes(dg), element] = max(var_max[i, nnodes(dg), element],
+ var_outer)
+ end
+ end
+ end
+
+ return nothing
+end
+
+function calc_bounds_onesided_interface!(var_minmax, minmax, variable, u, t, semi,
+ mesh::StructuredMesh{2})
+ _, equations, dg, cache = mesh_equations_solver_cache(semi)
+ (; boundary_conditions) = semi
+
+ # Calc bounds at interfaces and periodic boundaries
+ for element in eachelement(dg, cache)
+ # Get neighboring element ids
+ left = cache.elements.left_neighbors[1, element]
+ lower = cache.elements.left_neighbors[2, element]
+
+ if left != 0
+ for j in eachnode(dg)
+ var_left = variable(get_node_vars(u, equations, dg, nnodes(dg), j,
+ left), equations)
+ var_element = variable(get_node_vars(u, equations, dg, 1, j, element),
+ equations)
+
+ var_minmax[1, j, element] = minmax(var_minmax[1, j, element], var_left)
+ var_minmax[nnodes(dg), j, left] = minmax(var_minmax[nnodes(dg), j,
+ left], var_element)
+ end
+ end
+ if lower != 0
+ for i in eachnode(dg)
+ var_lower = variable(get_node_vars(u, equations, dg, i, nnodes(dg),
+ lower), equations)
+ var_element = variable(get_node_vars(u, equations, dg, i, 1, element),
+ equations)
+
+ var_minmax[i, 1, element] = minmax(var_minmax[i, 1, element], var_lower)
+ var_minmax[i, nnodes(dg), lower] = minmax(var_minmax[i, nnodes(dg),
+ lower],
+ var_element)
+ end
+ end
+ end
+
+ # Calc bounds at physical boundaries
+ if isperiodic(mesh)
+ return nothing
+ end
+ linear_indices = LinearIndices(size(mesh))
+ if !isperiodic(mesh, 1)
+ # - xi direction
+ for cell_y in axes(mesh, 2)
+ element = linear_indices[begin, cell_y]
+ for j in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[1],
+ cache, t, equations, dg,
+ 1, j, element)
+ var_outer = variable(u_outer, equations)
+
+ var_minmax[1, j, element] = minmax(var_minmax[1, j, element], var_outer)
+ end
+ end
+ # + xi direction
+ for cell_y in axes(mesh, 2)
+ element = linear_indices[end, cell_y]
+ for j in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[2],
+ cache, t, equations, dg,
+ nnodes(dg), j, element)
+ var_outer = variable(u_outer, equations)
+
+ var_minmax[nnodes(dg), j, element] = minmax(var_minmax[nnodes(dg), j,
+ element],
+ var_outer)
+ end
+ end
+ end
+ if !isperiodic(mesh, 2)
+ # - eta direction
+ for cell_x in axes(mesh, 1)
+ element = linear_indices[cell_x, begin]
+ for i in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[3],
+ cache, t, equations, dg,
+ i, 1, element)
+ var_outer = variable(u_outer, equations)
+
+ var_minmax[i, 1, element] = minmax(var_minmax[i, 1, element], var_outer)
+ end
+ end
+ # + eta direction
+ for cell_x in axes(mesh, 1)
+ element = linear_indices[cell_x, end]
+ for i in eachnode(dg)
+ u_outer = get_boundary_outer_state(boundary_conditions[4],
+ cache, t, equations, dg,
+ i, nnodes(dg), element)
+ var_outer = variable(u_outer, equations)
+
+ var_minmax[i, nnodes(dg), element] = minmax(var_minmax[i, nnodes(dg),
+ element],
+ var_outer)
+ end
+ end
+ end
+
+ return nothing
+end
+end # @muladd
diff --git a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl
index 9af8b65b4cd..2cc5e2cae82 100644
--- a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl
+++ b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl
@@ -5,8 +5,9 @@
@muladd begin
#! format: noindent
-function create_cache(mesh::TreeMesh{2}, equations,
- volume_integral::VolumeIntegralSubcellLimiting, dg::DG, uEltype)
+function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}},
+ equations, volume_integral::VolumeIntegralSubcellLimiting,
+ dg::DG, uEltype)
cache = create_cache(mesh, equations,
VolumeIntegralPureLGLFiniteVolume(volume_integral.volume_flux_fv),
dg, uEltype)
@@ -56,7 +57,7 @@ function create_cache(mesh::TreeMesh{2}, equations,
end
function calc_volume_integral!(du, u,
- mesh::TreeMesh{2},
+ mesh::Union{TreeMesh{2}, StructuredMesh{2}},
nonconservative_terms, equations,
volume_integral::VolumeIntegralSubcellLimiting,
dg::DGSEM, cache)
@@ -70,8 +71,8 @@ function calc_volume_integral!(du, u,
end
end
-@inline function subcell_limiting_kernel!(du, u,
- element, mesh::TreeMesh{2},
+@inline function subcell_limiting_kernel!(du, u, element,
+ mesh::Union{TreeMesh{2}, StructuredMesh{2}},
nonconservative_terms, equations,
volume_integral, limiter::SubcellLimiterIDP,
dg::DGSEM, cache)
@@ -391,7 +392,7 @@ end
# Calculate the antidiffusive flux `antidiffusive_flux` as the subtraction between `fhat` and `fstar` for conservative systems.
@inline function calcflux_antidiffusive!(fhat1_L, fhat1_R, fhat2_L, fhat2_R,
fstar1_L, fstar1_R, fstar2_L, fstar2_R,
- u, mesh,
+ u, mesh::Union{TreeMesh{2}, StructuredMesh{2}},
nonconservative_terms::False, equations,
limiter::SubcellLimiterIDP, dg, element, cache)
@unpack antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R = cache.antidiffusive_fluxes
@@ -429,7 +430,7 @@ end
# Calculate the antidiffusive flux `antidiffusive_flux` as the subtraction between `fhat` and `fstar` for conservative systems.
@inline function calcflux_antidiffusive!(fhat1_L, fhat1_R, fhat2_L, fhat2_R,
fstar1_L, fstar1_R, fstar2_L, fstar2_R,
- u, mesh,
+ u, mesh::Union{TreeMesh{2}, StructuredMesh{2}},
nonconservative_terms::True, equations,
limiter::SubcellLimiterIDP, dg, element, cache)
@unpack antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R = cache.antidiffusive_fluxes
diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
index 33ae0599748..4095f0853f9 100644
--- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl
+++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
@@ -283,7 +283,7 @@ end
end
@inline function idp_local_twosided!(alpha, limiter, u, t, dt, semi, variable)
- _, _, dg, cache = mesh_equations_solver_cache(semi)
+ mesh, _, dg, cache = mesh_equations_solver_cache(semi)
(; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes
(; inverse_weights) = dg.basis
@@ -294,8 +294,9 @@ end
calc_bounds_twosided!(var_min, var_max, variable, u, t, semi)
@threaded for element in eachelement(dg, semi.cache)
- inverse_jacobian = cache.elements.inverse_jacobian[element]
for j in eachnode(dg), i in eachnode(dg)
+ inverse_jacobian = get_inverse_jacobian(cache.elements.inverse_jacobian,
+ mesh, i, j, element)
var = u[variable, i, j, element]
# Real Zalesak type limiter
# * Zalesak (1979). "Fully multidimensional flux-corrected transport algorithms for fluids"
@@ -354,17 +355,18 @@ end
return nothing
end
-@inline function idp_local_onesided!(alpha, limiter, u, t, dt, semi, variable,
- min_or_max)
- _, equations, dg, cache = mesh_equations_solver_cache(semi)
+@inline function idp_local_onesided!(alpha, limiter, u, t, dt, semi,
+ variable, min_or_max)
+ mesh, equations, dg, cache = mesh_equations_solver_cache(semi)
(; variable_bounds) = limiter.cache.subcell_limiter_coefficients
var_minmax = variable_bounds[Symbol(string(variable), "_", string(min_or_max))]
calc_bounds_onesided!(var_minmax, min_or_max, variable, u, t, semi)
# Perform Newton's bisection method to find new alpha
@threaded for element in eachelement(dg, cache)
- inverse_jacobian = cache.elements.inverse_jacobian[element]
for j in eachnode(dg), i in eachnode(dg)
+ inverse_jacobian = get_inverse_jacobian(cache.elements.inverse_jacobian,
+ mesh, i, j, element)
u_local = get_node_vars(u, equations, dg, i, j, element)
newton_loops_alpha!(alpha, var_minmax[i, j, element], u_local,
i, j, element, variable, min_or_max,
@@ -407,7 +409,7 @@ end
# Global positivity limiting of conservative variables
@inline function idp_positivity_conservative!(alpha, limiter, u, dt, semi, variable)
- mesh, equations, dg, cache = mesh_equations_solver_cache(semi)
+ mesh, _, dg, cache = mesh_equations_solver_cache(semi)
(; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes
(; inverse_weights) = dg.basis
(; positivity_correction_factor) = limiter
@@ -416,8 +418,9 @@ end
var_min = variable_bounds[Symbol(string(variable), "_min")]
@threaded for element in eachelement(dg, semi.cache)
- inverse_jacobian = cache.elements.inverse_jacobian[element]
for j in eachnode(dg), i in eachnode(dg)
+ inverse_jacobian = get_inverse_jacobian(cache.elements.inverse_jacobian,
+ mesh, i, j, element)
var = u[variable, i, j, element]
if var < 0
error("Safe low-order method produces negative value for conservative variable $variable. Try a smaller time step.")
@@ -467,16 +470,21 @@ end
return nothing
end
+###############################################################################
+# Global positivity limiting of nonlinear variables
+
@inline function idp_positivity_nonlinear!(alpha, limiter, u, dt, semi, variable)
- _, equations, dg, cache = mesh_equations_solver_cache(semi)
+ mesh, equations, dg, cache = mesh_equations_solver_cache(semi)
(; positivity_correction_factor) = limiter
(; variable_bounds) = limiter.cache.subcell_limiter_coefficients
var_min = variable_bounds[Symbol(string(variable), "_min")]
@threaded for element in eachelement(dg, semi.cache)
- inverse_jacobian = cache.elements.inverse_jacobian[element]
for j in eachnode(dg), i in eachnode(dg)
+ inverse_jacobian = get_inverse_jacobian(cache.elements.inverse_jacobian,
+ mesh, i, j, element)
+
# Compute bound
u_local = get_node_vars(u, equations, dg, i, j, element)
var = variable(u_local, equations)
@@ -496,6 +504,9 @@ end
return nothing
end
+###############################################################################
+# Newton-bisection method
+
@inline function newton_loops_alpha!(alpha, bound, u, i, j, element, variable,
min_or_max, initial_check, final_check,
inverse_jacobian, dt, equations, dg, cache,
diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl
index f095c97b19e..4b3aa5c87e4 100644
--- a/test/test_structured_2d.jl
+++ b/test/test_structured_2d.jl
@@ -626,6 +626,62 @@ end
end
end
+@trixi_testset "elixir_euler_sedov_blast_wave_sc_subcell.jl (local bounds)" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_euler_sedov_blast_wave_sc_subcell.jl"),
+ l2=[
+ 0.6337774834710513,
+ 0.30377119245852724,
+ 0.3111372568571772,
+ 1.2976221893997268,
+ ],
+ linf=[
+ 2.2064877103138207,
+ 1.541067099687334,
+ 1.5487587769900337,
+ 6.271271639873466,
+ ],
+ tspan=(0.0, 0.5))
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 10000
+ end
+end
+
+@trixi_testset "elixir_euler_sedov_blast_wave_sc_subcell.jl (global bounds)" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_euler_sedov_blast_wave_sc_subcell.jl"),
+ positivity_variables_cons=["rho"],
+ positivity_variables_nonlinear=[pressure],
+ local_twosided_variables_cons=[],
+ local_onesided_variables_nonlinear=[],
+ l2=[
+ 0.7869912572385168,
+ 0.39170886758882073,
+ 0.39613257454431977,
+ 1.2951760266455101,
+ ],
+ linf=[
+ 5.156044534854053,
+ 3.6261667239538986,
+ 3.1807681416546085,
+ 6.3028422220287235,
+ ],
+ tspan=(0.0, 0.5))
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 10000
+ end
+end
+
@trixi_testset "elixir_euler_rayleigh_taylor_instability.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR,
"elixir_euler_rayleigh_taylor_instability.jl"),
From 5359525685966ab79a9abfca7f20920ee25c0505 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
<41898282+github-actions[bot]@users.noreply.github.com>
Date: Fri, 24 May 2024 12:18:26 +0200
Subject: [PATCH 22/44] CompatHelper: bump compat for Convex to 0.16 for
package test, (keep existing compat) (#1953)
* CompatHelper: bump compat for Convex to 0.16 for package test, (keep existing compat)
* bump compat for Convex.jl in main Project.toml
* Convex.jl v0.16.0 compat
* fmt
* Convex.jl v0.16 required due to changes of keyword arguments
---------
Co-authored-by: CompatHelper Julia
Co-authored-by: Hendrik Ranocha
Co-authored-by: Daniel Doehring
---
Project.toml | 2 +-
ext/TrixiConvexECOSExt.jl | 2 +-
test/Project.toml | 2 +-
test/test_tree_1d_advection.jl | 4 ++--
test/test_unit.jl | 20 ++++++++++----------
5 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/Project.toml b/Project.toml
index b605cb2c6b1..b2cc12010d2 100644
--- a/Project.toml
+++ b/Project.toml
@@ -61,7 +61,7 @@ TrixiConvexECOSExt = ["Convex", "ECOS"]
[compat]
CodeTracking = "1.0.5"
ConstructionBase = "1.3"
-Convex = "0.15.4"
+Convex = "0.16"
DataStructures = "0.18.15"
DelimitedFiles = "1"
DiffEqBase = "6 - 6.143"
diff --git a/ext/TrixiConvexECOSExt.jl b/ext/TrixiConvexECOSExt.jl
index fac127699ce..948dbf103cd 100644
--- a/ext/TrixiConvexECOSExt.jl
+++ b/ext/TrixiConvexECOSExt.jl
@@ -136,7 +136,7 @@ function Trixi.bisect_stability_polynomial(consistency_order, num_eig_vals,
"reltol_inacc" => 5e-5,
"nitref" => 9,
"maxit" => 100,
- "verbose" => 3); silent_solver = true)
+ "verbose" => 3); silent = true)
abs_p = problem.optval
diff --git a/test/Project.toml b/test/Project.toml
index 5fc2bb18bdf..c8ae33a40ae 100644
--- a/test/Project.toml
+++ b/test/Project.toml
@@ -19,7 +19,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[compat]
Aqua = "0.8"
CairoMakie = "0.10"
-Convex = "0.15.4"
+Convex = "0.16"
DelimitedFiles = "1"
Downloads = "1"
ECOS = "1.1.2"
diff --git a/test/test_tree_1d_advection.jl b/test/test_tree_1d_advection.jl
index afa92efeddb..3885fb50061 100644
--- a/test/test_tree_1d_advection.jl
+++ b/test/test_tree_1d_advection.jl
@@ -84,8 +84,8 @@ end
@trixi_testset "elixir_advection_perk2.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_perk2.jl"),
- l2=[0.014139244532882265],
- linf=[0.019997568971592217])
+ l2=[0.014139242834192841],
+ linf=[0.01999756655819429])
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
diff --git a/test/test_unit.jl b/test/test_unit.jl
index de13d41e931..d7ec2084361 100644
--- a/test/test_unit.jl
+++ b/test/test_unit.jl
@@ -1662,16 +1662,16 @@ end
ode_algorithm = Trixi.PairedExplicitRK2(12, tspan, vec(eig_vals))
@test isapprox(ode_algorithm.a_matrix,
- [0.06453812656705388 0.02637096434203703
- 0.09470601372266194 0.04165762264097442
- 0.12332877820057538 0.05848940361760645
- 0.1498701503275483 0.07740257694517898
- 0.173421149536068 0.09930612319120471
- 0.19261978147927503 0.12556203670254315
- 0.2052334022622969 0.15840296137406676
- 0.2073489042901963 0.2017420048007128
- 0.19135142349998963 0.2631940310454649
- 0.13942836392940833 0.3605716360705917], atol = 1e-13)
+ [0.06453812656711647 0.02637096434197444
+ 0.09470601372274887 0.041657622640887494
+ 0.12332877820069793 0.058489403617483886
+ 0.14987015032771522 0.07740257694501203
+ 0.1734211495362651 0.0993061231910076
+ 0.19261978147948638 0.1255620367023318
+ 0.20523340226247055 0.1584029613738931
+ 0.20734890429023528 0.20174200480067384
+ 0.1913514234997008 0.26319403104575373
+ 0.13942836392866081 0.3605716360713392], atol = 1e-13)
end
@testset "Sutherlands Law" begin
From 228aec04210722b39a3a11d3d6129e4317bb7d52 Mon Sep 17 00:00:00 2001
From: Michael Schlottke-Lakemper
Date: Sun, 26 May 2024 14:50:19 +0200
Subject: [PATCH 23/44] Add JuliaCon 24 contributions to readme (#1957)
* Add JuliaCon 24 contributions to readme
* Mention minisymposium
---
README.md | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/README.md b/README.md
index 86a8514a5ba..87075c885f9 100644
--- a/README.md
+++ b/README.md
@@ -19,6 +19,28 @@
+***
+**Trixi.jl at JuliaCon 2024**
+At this year's JuliaCon in Eindhoven, Netherlands, we will be present with several contributions
+from the Trixi Framework ecosystem:
+
+* [**Julia for Particle-Based Multiphysics with TrixiParticles.jl**](https://pretalx.com/juliacon2024/talk/TPFF8L/),
+ [*Erik Faulhaber*](https://github.com/efaulhaber/), [*Niklas Neher*](https://github.com/lasnikas/),
+ 10th July 2024, 11:30am–12:00pm, Function (4.1)
+* [**Towards Aerodynamic Simulations in Julia with Trixi.jl**](https://pretalx.com/juliacon2024/talk/XH8KBG/),
+ [*Daniel Doehring*](https://github.com/danieldoehring/),
+ 10th July 2024, 15:00pm–15:30pm, While Loop (4.2)
+* [**libtrixi: serving legacy codes in earth system modeling with fresh Julia CFD**](https://pretalx.com/juliacon2024/talk/JBKVGF/),
+ [*Benedict Geihe*](https://github.com/benegee/),
+ 12th July 2024, 14:00pm–17:00pm, Function (4.1)
+
+The last talk is part of the
+[Julia for High-Performance Computing](https://pretalx.com/juliacon2024/talk/JBKVGF/)
+minisymposium, which this year is hosted by our own [*Hendrik Ranocha*](https://github.com/ranocha/).
+
+We are looking forward to seeing you there ♥️
+***
+
**Trixi.jl** is a numerical simulation framework for conservation
laws written in [Julia](https://julialang.org). A key objective for the
framework is to be useful to both scientists and students. Therefore, next to
From 76719a85e0226ba9883dcf3817c6afe8daffb3f6 Mon Sep 17 00:00:00 2001
From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com>
Date: Wed, 29 May 2024 06:04:29 +0200
Subject: [PATCH 24/44] Clean up callback conditions (#1960)
* Clean up callback conditions
* Reformat
---
src/callbacks_step/alive.jl | 1 -
src/callbacks_step/analysis.jl | 10 ++++------
src/callbacks_step/save_restart.jl | 5 ++---
src/callbacks_step/save_solution.jl | 3 +--
src/callbacks_step/visualization.jl | 3 +--
5 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/src/callbacks_step/alive.jl b/src/callbacks_step/alive.jl
index 9df7181521e..9700f7e4cdc 100644
--- a/src/callbacks_step/alive.jl
+++ b/src/callbacks_step/alive.jl
@@ -68,7 +68,6 @@ function (alive_callback::AliveCallback)(u, t, integrator)
# We need to check the number of accepted steps since callbacks are not
# activated after a rejected step.
return alive_interval > 0 && ((integrator.stats.naccept % alive_interval == 0 &&
- !(integrator.stats.naccept == 0 && integrator.iter > 0) &&
(analysis_interval == 0 ||
integrator.stats.naccept % analysis_interval != 0)) ||
isfinished(integrator))
diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl
index 8f89af755a2..7b4a97c2a79 100644
--- a/src/callbacks_step/analysis.jl
+++ b/src/callbacks_step/analysis.jl
@@ -26,8 +26,8 @@ or `extra_analysis_errors = (:conservation_error,)`.
If you want to omit the computation (to safe compute-time) of the [`default_analysis_errors`](@ref), specify
`analysis_errors = Symbol[]`.
Note: `default_analysis_errors` are `:l2_error` and `:linf_error` for all equations.
-If you want to compute `extra_analysis_errors` such as `:conservation_error` solely, i.e.,
-without `:l2_error, :linf_error` you need to specify
+If you want to compute `extra_analysis_errors` such as `:conservation_error` solely, i.e.,
+without `:l2_error, :linf_error` you need to specify
`analysis_errors = [:conservation_error]` instead of `extra_analysis_errors = [:conservation_error]`.
Further scalar functions `func` in `extra_analysis_integrals` are applied to the numerical
@@ -119,9 +119,7 @@ function AnalysisCallback(mesh, equations::AbstractEquations, solver, cache;
# We need to check the number of accepted steps since callbacks are not
# activated after a rejected step.
condition = (u, t, integrator) -> interval > 0 &&
- ((integrator.stats.naccept % interval == 0 &&
- !(integrator.stats.naccept == 0 && integrator.iter > 0)) ||
- isfinished(integrator))
+ (integrator.stats.naccept % interval == 0 || isfinished(integrator))
analyzer = SolutionAnalyzer(solver; kwargs...)
cache_analysis = create_cache_analysis(analyzer, mesh, equations, solver, cache,
@@ -696,7 +694,7 @@ include("analysis_dg3d_parallel.jl")
# Special analyze for `SemidiscretizationHyperbolicParabolic` such that
# precomputed gradients are available. Required for `enstrophy` (see above) and viscous forces.
-# Note that this needs to be included after `analysis_surface_integral_2d.jl` to
+# Note that this needs to be included after `analysis_surface_integral_2d.jl` to
# have `VariableViscous` available.
function analyze(quantity::AnalysisSurfaceIntegral{Variable},
du, u, t,
diff --git a/src/callbacks_step/save_restart.jl b/src/callbacks_step/save_restart.jl
index 0d174d85805..0b0d2420c7a 100644
--- a/src/callbacks_step/save_restart.jl
+++ b/src/callbacks_step/save_restart.jl
@@ -83,8 +83,7 @@ function (restart_callback::SaveRestartCallback)(u, t, integrator)
# (total #steps) (#accepted steps)
# We need to check the number of accepted steps since callbacks are not
# activated after a rejected step.
- return interval > 0 && (((integrator.stats.naccept % interval == 0) &&
- !(integrator.stats.naccept == 0 && integrator.iter > 0)) ||
+ return interval > 0 && (integrator.stats.naccept % interval == 0 ||
(save_final_restart && isfinished(integrator)))
end
@@ -198,7 +197,7 @@ function load_adaptive_time_integrator!(integrator, restart_file::AbstractString
# Reevaluate integrator.fsal_first on the first step
integrator.reeval_fsal = true
# Load additional parameters for PIDController
- if hasproperty(controller, :err) # Distinguish PIDController from PIController
+ if hasproperty(controller, :err) # Distinguish PIDController from PIController
controller.err[:] = read(attributes(file)["time_integrator_controller_err"])
end
end
diff --git a/src/callbacks_step/save_solution.jl b/src/callbacks_step/save_solution.jl
index c106fe69bcd..870cea0b9f5 100644
--- a/src/callbacks_step/save_solution.jl
+++ b/src/callbacks_step/save_solution.jl
@@ -174,8 +174,7 @@ function (solution_callback::SaveSolutionCallback)(u, t, integrator)
# (total #steps) (#accepted steps)
# We need to check the number of accepted steps since callbacks are not
# activated after a rejected step.
- return interval_or_dt > 0 && (((integrator.stats.naccept % interval_or_dt == 0) &&
- !(integrator.stats.naccept == 0 && integrator.iter > 0)) ||
+ return interval_or_dt > 0 && (integrator.stats.naccept % interval_or_dt == 0 ||
(save_final_solution && isfinished(integrator)))
end
diff --git a/src/callbacks_step/visualization.jl b/src/callbacks_step/visualization.jl
index 98c0126a302..30ac88e9fd7 100644
--- a/src/callbacks_step/visualization.jl
+++ b/src/callbacks_step/visualization.jl
@@ -137,8 +137,7 @@ function (visualization_callback::VisualizationCallback)(u, t, integrator)
# (total #steps) (#accepted steps)
# We need to check the number of accepted steps since callbacks are not
# activated after a rejected step.
- return interval > 0 && ((integrator.stats.naccept % interval == 0 &&
- !(integrator.stats.naccept == 0 && integrator.iter > 0)) ||
+ return interval > 0 && (integrator.stats.naccept % interval == 0 ||
isfinished(integrator))
end
From bdb8180ee28b1491134a81869ab11fd7c47adb9e Mon Sep 17 00:00:00 2001
From: Benedict <135045760+benegee@users.noreply.github.com>
Date: Fri, 31 May 2024 21:08:37 +0200
Subject: [PATCH 25/44] Read in capacity for parallel TreeMesh (#1913)
* read in capacity for parallel TreeMesh
fixes restarting with AMR and MPI
* add elixir testing restart with AMR
* add elixir with AMR and restart to MPI test suite
* adapt errors in test
* Update test/test_mpi_tree.jl
Co-authored-by: Hendrik Ranocha
---------
Co-authored-by: Hendrik Ranocha
---
.../elixir_advection_restart_amr.jl | 58 +++++++++++++++++++
src/meshes/mesh_io.jl | 11 ++--
test/test_mpi_tree.jl | 8 +++
3 files changed, 72 insertions(+), 5 deletions(-)
create mode 100644 examples/tree_2d_dgsem/elixir_advection_restart_amr.jl
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart_amr.jl b/examples/tree_2d_dgsem/elixir_advection_restart_amr.jl
new file mode 100644
index 00000000000..2e4ca38a3fa
--- /dev/null
+++ b/examples/tree_2d_dgsem/elixir_advection_restart_amr.jl
@@ -0,0 +1,58 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Define time integration algorithm
+alg = CarpenterKennedy2N54(williamson_condition = false)
+# Create a restart file
+trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_extended.jl"), alg = alg,
+ tspan = (0.0, 3.0))
+
+###############################################################################
+# adapt the parameters that have changed compared to "elixir_advection_extended.jl"
+
+# Note: If you get a restart file from somewhere else, you need to provide
+# appropriate setups in the elixir loading a restart file
+
+restart_filename = joinpath("out", "restart_000040.h5")
+mesh = load_mesh(restart_filename)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+tspan = (load_time(restart_filename), 5.0)
+dt = load_dt(restart_filename)
+ode = semidiscretize(semi, tspan, restart_filename);
+
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+# Add AMR callback
+amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first),
+ base_level = 3,
+ med_level = 4, med_threshold = 0.8,
+ max_level = 5, max_threshold = 1.2)
+amr_callback = AMRCallback(semi, amr_controller,
+ interval = 5,
+ adapt_initial_condition = false,
+ adapt_initial_condition_only_refine = true)
+callbacks_ext = CallbackSet(amr_callback, callbacks.discrete_callbacks...)
+
+integrator = init(ode, alg,
+ dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ callback = callbacks_ext, maxiters = 100_000; ode_default_options()...)
+
+# Load saved context for adaptive time integrator
+if integrator.opts.adaptive
+ load_adaptive_time_integrator!(integrator, restart_filename)
+end
+
+# Get the last time index and work with that.
+load_timestep!(integrator, restart_filename)
+
+###############################################################################
+# run the simulation
+
+sol = solve!(integrator)
+
+summary_callback() # print the timer summary
diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl
index d74a0c0cea1..b74a3b4d642 100644
--- a/src/meshes/mesh_io.jl
+++ b/src/meshes/mesh_io.jl
@@ -370,17 +370,18 @@ function load_mesh_parallel(mesh_file::AbstractString; n_cells_max, RealT)
if mesh_type == "TreeMesh"
if mpi_isroot()
- n_cells = h5open(mesh_file, "r") do file
- read(attributes(file)["n_cells"])
+ n_cells, capacity = h5open(mesh_file, "r") do file
+ return read(attributes(file)["n_cells"]),
+ read(attributes(file)["capacity"])
end
- MPI.Bcast!(Ref(ndims_), mpi_root(), mpi_comm())
MPI.Bcast!(Ref(n_cells), mpi_root(), mpi_comm())
+ MPI.Bcast!(Ref(capacity), mpi_root(), mpi_comm())
else
- ndims_ = MPI.Bcast!(Ref(0), mpi_root(), mpi_comm())[]
n_cells = MPI.Bcast!(Ref(0), mpi_root(), mpi_comm())[]
+ capacity = MPI.Bcast!(Ref(0), mpi_root(), mpi_comm())[]
end
- mesh = TreeMesh(ParallelTree{ndims_}, max(n_cells, n_cells_max))
+ mesh = TreeMesh(ParallelTree{ndims_}, max(n_cells, n_cells_max, capacity))
load_mesh!(mesh, mesh_file)
elseif mesh_type == "P4estMesh"
if mpi_isroot()
diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl
index 6351a405b5d..e6e00b2e6b6 100644
--- a/test/test_mpi_tree.jl
+++ b/test/test_mpi_tree.jl
@@ -70,6 +70,14 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows()
coverage_override=(maxiters = 6,))
end
+ @trixi_testset "elixir_advection_restart_amr.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR,
+ "elixir_advection_restart_amr.jl"),
+ l2=[7.870371848717432e-5],
+ linf=[0.0007374081713964475],
+ coverage_override=(maxiters = 50,))
+ end
+
# Linear scalar advection with AMR
# These example files are only for testing purposes and have no practical use
@trixi_testset "elixir_advection_amr_refine_twice.jl" begin
From 5a8999ee5cfaa2cb499261d9c38da1006fd11638 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 31 May 2024 21:09:38 +0200
Subject: [PATCH 26/44] set version to v0.7.15
---
Project.toml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Project.toml b/Project.toml
index b2cc12010d2..ce992c9adba 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.15-pre"
+version = "0.7.15"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
@@ -111,4 +111,4 @@ julia = "1.8"
[extras]
Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"
Convex = "f65535da-76fb-5f13-bab9-19810c17039a"
-ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
\ No newline at end of file
+ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
From 3b52a3013fb09564240a3bc876c5e91f5f6c8642 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Fri, 31 May 2024 21:09:51 +0200
Subject: [PATCH 27/44] set development version to v0.7.16-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index ce992c9adba..9df96d6efa5 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.15"
+version = "0.7.16-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 58f9a8a5d8d618a480628ad65a6f5286c802620a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Sun, 2 Jun 2024 09:36:46 +0200
Subject: [PATCH 28/44] Bump julia-actions/cache from 1 to 2 (#1966)
Bumps [julia-actions/cache](https://github.com/julia-actions/cache) from 1 to 2.
- [Release notes](https://github.com/julia-actions/cache/releases)
- [Changelog](https://github.com/julia-actions/cache/blob/main/devdocs/making_a_new_release.md)
- [Commits](https://github.com/julia-actions/cache/compare/v1...v2)
---
updated-dependencies:
- dependency-name: julia-actions/cache
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/Documenter.yml | 2 +-
.github/workflows/Downgrade.yml | 2 +-
.github/workflows/ci.yml | 2 +-
.github/workflows/downstream.yml | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml
index df6623839d7..c0fdf8d3b48 100644
--- a/.github/workflows/Documenter.yml
+++ b/.github/workflows/Documenter.yml
@@ -38,7 +38,7 @@ jobs:
with:
version: '1.10'
show-versioninfo: true
- - uses: julia-actions/cache@v1
+ - uses: julia-actions/cache@v2
- uses: julia-actions/julia-buildpkg@v1
env:
PYTHON: ""
diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml
index 46ce5d71e36..a7c4f9caf99 100644
--- a/.github/workflows/Downgrade.yml
+++ b/.github/workflows/Downgrade.yml
@@ -69,7 +69,7 @@ jobs:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)'
- - uses: julia-actions/cache@v1
+ - uses: julia-actions/cache@v2
- uses: julia-actions/julia-downgrade-compat@v1
with:
skip: LinearAlgebra,Printf,SparseArrays,UUIDs,DiffEqBase,DelimitedFiles
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index b4b3cfa1487..0c636ee8b0b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -112,7 +112,7 @@ jobs:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)'
- - uses: julia-actions/cache@v1
+ - uses: julia-actions/cache@v2
- uses: julia-actions/julia-buildpkg@v1
env:
PYTHON: ""
diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml
index 83ed6cc79c7..939eb533ffc 100644
--- a/.github/workflows/downstream.yml
+++ b/.github/workflows/downstream.yml
@@ -69,7 +69,7 @@ jobs:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)'
- - uses: julia-actions/cache@v1
+ - uses: julia-actions/cache@v2
- uses: julia-actions/julia-buildpkg@v1
- name: Retrieve downstream package
# Note: we retrieve the current `main` branch of the downstream package to ensure
From 866530092e95332b2fd51a7063d3140ba72a577c Mon Sep 17 00:00:00 2001
From: Huiyu Xie
Date: Mon, 3 Jun 2024 05:19:32 -0700
Subject: [PATCH 29/44] Add numerical support of other real types (`Float32`)
(#1909)
* start with src/equations
* revise after the first review
* format src/equations
* another version
* revise after the second review
* apply suggestions from code review - comments
Co-authored-by: Hendrik Ranocha
* remove TODO
* fix small errors
* complete compressible_euler
* fix error and format
* revise min max
* revise based on new docs
* revise based on new comments
* start sample test
* add more tests
* fix typos and comments
* change code review
* add to CI
* apply inferred macro
Co-authored-by: Hendrik Ranocha
* complete
* add Trixi
---------
Co-authored-by: Hendrik Ranocha
---
src/equations/acoustic_perturbation_2d.jl | 56 +--
src/equations/compressible_euler_1d.jl | 213 ++++-----
src/equations/compressible_euler_2d.jl | 450 +++++++++----------
src/equations/compressible_euler_3d.jl | 379 ++++++++--------
test/runtests.jl | 1 +
test/test_type.jl | 502 ++++++++++++++++++++++
6 files changed, 1075 insertions(+), 526 deletions(-)
create mode 100644 test/test_type.jl
diff --git a/src/equations/acoustic_perturbation_2d.jl b/src/equations/acoustic_perturbation_2d.jl
index f4ce770e1e9..1bde94a648a 100644
--- a/src/equations/acoustic_perturbation_2d.jl
+++ b/src/equations/acoustic_perturbation_2d.jl
@@ -112,9 +112,9 @@ A constant initial condition where the state variables are zero and the mean flo
Uses the global mean values from `equations`.
"""
function initial_condition_constant(x, t, equations::AcousticPerturbationEquations2D)
- v1_prime = 0.0
- v2_prime = 0.0
- p_prime_scaled = 0.0
+ v1_prime = 0
+ v2_prime = 0
+ p_prime_scaled = 0
return SVector(v1_prime, v2_prime, p_prime_scaled, global_mean_vars(equations)...)
end
@@ -127,12 +127,13 @@ A smooth initial condition used for convergence tests in combination with
"""
function initial_condition_convergence_test(x, t,
equations::AcousticPerturbationEquations2D)
- c = 2.0
- A = 0.2
- L = 2.0
- f = 2.0 / L
- a = 1.0
- omega = 2 * pi * f
+ RealT = eltype(x)
+ a = 1
+ c = 2
+ L = 2
+ f = 2.0f0 / L
+ A = convert(RealT, 0.2)
+ omega = 2 * convert(RealT, pi) * f
init = c + A * sin(omega * (x[1] + x[2] - a * t))
v1_prime = init
@@ -154,12 +155,13 @@ function source_terms_convergence_test(u, x, t,
equations::AcousticPerturbationEquations2D)
v1_mean, v2_mean, c_mean, rho_mean = cons2mean(u, equations)
- c = 2.0
- A = 0.2
- L = 2.0
- f = 2.0 / L
- a = 1.0
- omega = 2 * pi * f
+ RealT = eltype(u)
+ a = 1
+ c = 2
+ L = 2
+ f = 2.0f0 / L
+ A = convert(RealT, 0.2)
+ omega = 2 * convert(RealT, pi) * f
si, co = sincos(omega * (x[1] + x[2] - a * t))
tmp = v1_mean + v2_mean - a
@@ -168,7 +170,7 @@ function source_terms_convergence_test(u, x, t,
du3 = A * omega * co * (2 * c_mean^2 * rho_mean + 2 * c * tmp + 2 * A * tmp * si) /
c_mean^2
- du4 = du5 = du6 = du7 = 0.0
+ du4 = du5 = du6 = du7 = 0
return SVector(du1, du2, du3, du4, du5, du6, du7)
end
@@ -179,8 +181,8 @@ end
A Gaussian pulse in a constant mean flow. Uses the global mean values from `equations`.
"""
function initial_condition_gauss(x, t, equations::AcousticPerturbationEquations2D)
- v1_prime = 0.0
- v2_prime = 0.0
+ v1_prime = 0
+ v2_prime = 0
p_prime = exp(-4 * (x[1]^2 + x[2]^2))
prim = SVector(v1_prime, v2_prime, p_prime, global_mean_vars(equations)...)
@@ -240,8 +242,8 @@ function boundary_condition_slip_wall(u_inner, normal_direction::AbstractVector,
u_normal = normal[1] * u_inner[1] + normal[2] * u_inner[2]
# create the "external" boundary solution state
- u_boundary = SVector(u_inner[1] - 2.0 * u_normal * normal[1],
- u_inner[2] - 2.0 * u_normal * normal[2],
+ u_boundary = SVector(u_inner[1] - 2 * u_normal * normal[1],
+ u_inner[2] - 2 * u_normal * normal[2],
u_inner[3], cons2mean(u_inner, equations)...)
# calculate the boundary flux
@@ -257,13 +259,14 @@ end
v1_mean, v2_mean, c_mean, rho_mean = cons2mean(u, equations)
# Calculate flux for conservative state variables
+ RealT = eltype(u)
if orientation == 1
f1 = v1_mean * v1_prime + v2_mean * v2_prime +
c_mean^2 * p_prime_scaled / rho_mean
- f2 = zero(eltype(u))
+ f2 = zero(RealT)
f3 = rho_mean * v1_prime + v1_mean * p_prime_scaled
else
- f1 = zero(eltype(u))
+ f1 = zero(RealT)
f2 = v1_mean * v1_prime + v2_mean * v2_prime +
c_mean^2 * p_prime_scaled / rho_mean
f3 = rho_mean * v2_prime + v2_mean * p_prime_scaled
@@ -272,7 +275,7 @@ end
# The rest of the state variables are actually variable coefficients, hence the flux should be
# zero. See https://github.com/trixi-framework/Trixi.jl/issues/358#issuecomment-784828762
# for details.
- f4 = f5 = f6 = f7 = zero(eltype(u))
+ f4 = f5 = f6 = f7 = 0
return SVector(f1, f2, f3, f4, f5, f6, f7)
end
@@ -313,7 +316,7 @@ end
# The rest of the state variables are actually variable coefficients, hence the flux should be
# zero. See https://github.com/trixi-framework/Trixi.jl/issues/358#issuecomment-784828762
# for details.
- f4 = f5 = f6 = f7 = zero(eltype(u))
+ f4 = f5 = f6 = f7 = 0
return SVector(f1, f2, f3, f4, f5, f6, f7)
end
@@ -344,8 +347,9 @@ end
equations::AcousticPerturbationEquations2D)
λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction,
equations)
- diss = -0.5 * λ * (u_rr - u_ll)
- z = zero(eltype(u_ll))
+ diss = -0.5f0 * λ * (u_rr - u_ll)
+ z = 0
+
return SVector(diss[1], diss[2], diss[3], z, z, z, z)
end
diff --git a/src/equations/compressible_euler_1d.jl b/src/equations/compressible_euler_1d.jl
index a50c896cd1c..a71750ff98c 100644
--- a/src/equations/compressible_euler_1d.jl
+++ b/src/equations/compressible_euler_1d.jl
@@ -53,9 +53,10 @@ varnames(::typeof(cons2prim), ::CompressibleEulerEquations1D) = ("rho", "v1", "p
A constant initial condition to test free-stream preservation.
"""
function initial_condition_constant(x, t, equations::CompressibleEulerEquations1D)
- rho = 1.0
- rho_v1 = 0.1
- rho_e = 10.0
+ RealT = eltype(x)
+ rho = 1
+ rho_v1 = convert(RealT, 0.1)
+ rho_e = 10
return SVector(rho, rho_v1, rho_e)
end
@@ -68,11 +69,12 @@ A smooth initial condition used for convergence tests in combination with
"""
function initial_condition_convergence_test(x, t,
equations::CompressibleEulerEquations1D)
+ RealT = eltype(x)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
ini = c + A * sin(ω * (x[1] - t))
rho = ini
@@ -92,11 +94,12 @@ Source terms used for convergence tests in combination with
@inline function source_terms_convergence_test(u, x, t,
equations::CompressibleEulerEquations1D)
# Same settings as in `initial_condition`
+ RealT = eltype(u)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
γ = equations.gamma
x1, = x
@@ -108,8 +111,8 @@ Source terms used for convergence tests in combination with
# Note that d/dt rho = -d/dx rho.
# This yields du2 = du3 = d/dx p (derivative of pressure).
# Other terms vanish because of v = 1.
- du1 = zero(eltype(u))
- du2 = rho_x * (2 * rho - 0.5) * (γ - 1)
+ du1 = 0
+ du2 = rho_x * (2 * rho - 0.5f0) * (γ - 1)
du3 = du2
return SVector(du1, du2, du3)
@@ -130,11 +133,12 @@ with the following parameters
- polydeg = 5
"""
function initial_condition_density_wave(x, t, equations::CompressibleEulerEquations1D)
- v1 = 0.1
- rho = 1 + 0.98 * sinpi(2 * (x[1] - t * v1))
+ RealT = eltype(x)
+ v1 = convert(RealT, 0.1)
+ rho = 1 + convert(RealT, 0.98) * sinpi(2 * (x[1] - t * v1))
rho_v1 = rho * v1
p = 20
- rho_e = p / (equations.gamma - 1) + 1 / 2 * rho * v1^2
+ rho_e = p / (equations.gamma - 1) + 0.5f0 * rho * v1^2
return SVector(rho, rho_v1, rho_e)
end
@@ -150,19 +154,20 @@ function initial_condition_weak_blast_wave(x, t,
equations::CompressibleEulerEquations1D)
# From Hennemann & Gassner JCP paper 2020 (Sec. 6.3)
# Set up polar coordinates
- inicenter = SVector(0.0)
+ RealT = eltype(x)
+ inicenter = SVector(0)
x_norm = x[1] - inicenter[1]
r = abs(x_norm)
# The following code is equivalent to
# phi = atan(0.0, x_norm)
# cos_phi = cos(phi)
# in 1D but faster
- cos_phi = x_norm > 0 ? one(x_norm) : -one(x_norm)
+ cos_phi = x_norm > 0 ? 1 : -1
# Calculate primitive variables
- rho = r > 0.5 ? 1.0 : 1.1691
- v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi
- p = r > 0.5 ? 1.0 : 1.245
+ rho = r > 0.5f0 ? one(RealT) : convert(RealT, 1.1691)
+ v1 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * cos_phi
+ p = r > 0.5f0 ? one(RealT) : convert(RealT, 1.245)
return prim2cons(SVector(rho, v1, p), equations)
end
@@ -183,17 +188,18 @@ with self-gravity from
function initial_condition_eoc_test_coupled_euler_gravity(x, t,
equations::CompressibleEulerEquations1D)
# OBS! this assumes that γ = 2 other manufactured source terms are incorrect
- if equations.gamma != 2.0
+ if equations.gamma != 2
error("adiabatic constant must be 2 for the coupling convergence test")
end
- c = 2.0
- A = 0.1
+ RealT = eltype(x)
+ c = 2
+ A = convert(RealT, 0.1)
ini = c + A * sinpi(x[1] - t)
- G = 1.0 # gravitational constant
+ G = 1 # gravitational constant
rho = ini
- v1 = 1.0
- p = 2 * ini^2 * G / pi # * 2 / ndims, but ndims==1 here
+ v1 = 1
+ p = 2 * ini^2 * G / convert(RealT, pi) # * 2 / ndims, but ndims==1 here
return prim2cons(SVector(rho, v1, p), equations)
end
@@ -229,31 +235,29 @@ are available in the paper:
# Eleuterio F. Toro (2009)
# Riemann Solvers and Numerical Methods for Fluid Dynamics: A Practical Introduction
# [DOI: 10.1007/b79761](https://doi.org/10.1007/b79761)
- if v_normal <= 0.0
+ if v_normal <= 0
sound_speed = sqrt(equations.gamma * p_local / rho_local) # local sound speed
p_star = p_local *
- (1 + 0.5 * (equations.gamma - 1) * v_normal / sound_speed)^(2 *
- equations.gamma *
- equations.inv_gamma_minus_one)
- else # v_normal > 0.0
+ (1 + 0.5f0 * (equations.gamma - 1) * v_normal / sound_speed)^(2 *
+ equations.gamma *
+ equations.inv_gamma_minus_one)
+ else # v_normal > 0
A = 2 / ((equations.gamma + 1) * rho_local)
B = p_local * (equations.gamma - 1) / (equations.gamma + 1)
p_star = p_local +
- 0.5 * v_normal / A *
+ 0.5f0 * v_normal / A *
(v_normal + sqrt(v_normal^2 + 4 * A * (p_local + B)))
end
# For the slip wall we directly set the flux as the normal velocity is zero
- return SVector(zero(eltype(u_inner)),
- p_star,
- zero(eltype(u_inner)))
+ return SVector(0, p_star, 0)
end
# Calculate 1D flux for a single point
@inline function flux(u, orientation::Integer, equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
# Ignore orientation since it is always "1" in 1D
f1 = rho_v1
f2 = rho_v1 * v1 + p
@@ -283,14 +287,14 @@ The modification is in the energy flux to guarantee pressure equilibrium and was
rho_rr, v1_rr, p_rr = cons2prim(u_rr, equations)
# Average each factor of products in flux
- rho_avg = 1 / 2 * (rho_ll + rho_rr)
- v1_avg = 1 / 2 * (v1_ll + v1_rr)
- p_avg = 1 / 2 * (p_ll + p_rr)
- kin_avg = 1 / 2 * (v1_ll * v1_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ kin_avg = 0.5f0 * (v1_ll * v1_rr)
# Calculate fluxes
# Ignore orientation since it is always "1" in 1D
- pv1_avg = 1 / 2 * (p_ll * v1_rr + p_rr * v1_ll)
+ pv1_avg = 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
f1 = rho_avg * v1_avg
f2 = f1 * v1_avg + p_avg
f3 = p_avg * v1_avg * equations.inv_gamma_minus_one + f1 * kin_avg + pv1_avg
@@ -316,10 +320,10 @@ Kinetic energy preserving two-point flux by
rho_rr, v1_rr, p_rr = cons2prim(u_rr, equations)
# Average each factor of products in flux
- rho_avg = 1 / 2 * (rho_ll + rho_rr)
- v1_avg = 1 / 2 * (v1_ll + v1_rr)
- p_avg = 1 / 2 * (p_ll + p_rr)
- e_avg = 1 / 2 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ e_avg = 0.5f0 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
# Ignore orientation since it is always "1" in 1D
f1 = rho_avg * v1_avg
@@ -343,25 +347,25 @@ Entropy conserving two-point flux by
# Unpack left and right state
rho_ll, v1_ll, p_ll = cons2prim(u_ll, equations)
rho_rr, v1_rr, p_rr = cons2prim(u_rr, equations)
- beta_ll = 0.5 * rho_ll / p_ll
- beta_rr = 0.5 * rho_rr / p_rr
- specific_kin_ll = 0.5 * (v1_ll^2)
- specific_kin_rr = 0.5 * (v1_rr^2)
+ beta_ll = 0.5f0 * rho_ll / p_ll
+ beta_rr = 0.5f0 * rho_rr / p_rr
+ specific_kin_ll = 0.5f0 * (v1_ll^2)
+ specific_kin_rr = 0.5f0 * (v1_rr^2)
# Compute the necessary mean values
- rho_avg = 0.5 * (rho_ll + rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
rho_mean = ln_mean(rho_ll, rho_rr)
beta_mean = ln_mean(beta_ll, beta_rr)
- beta_avg = 0.5 * (beta_ll + beta_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- p_mean = 0.5 * rho_avg / beta_avg
+ beta_avg = 0.5f0 * (beta_ll + beta_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ p_mean = 0.5f0 * rho_avg / beta_avg
velocity_square_avg = specific_kin_ll + specific_kin_rr
# Calculate fluxes
# Ignore orientation since it is always "1" in 1D
f1 = rho_mean * v1_avg
f2 = f1 * v1_avg + p_mean
- f3 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f3 = f1 * 0.5f0 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg
return SVector(f1, f2, f3)
@@ -394,16 +398,16 @@ See also
# log((ϱₗ/pₗ) / (ϱᵣ/pᵣ)) / (ϱₗ/pₗ - ϱᵣ/pᵣ)
# = pₗ pᵣ log((ϱₗ pᵣ) / (ϱᵣ pₗ)) / (ϱₗ pᵣ - ϱᵣ pₗ)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- p_avg = 0.5 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr)
# Calculate fluxes
# Ignore orientation since it is always "1" in 1D
f1 = rho_mean * v1_avg
f2 = f1 * v1_avg + p_avg
f3 = f1 * (velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) +
- 0.5 * (p_ll * v1_rr + p_rr * v1_ll)
+ 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
return SVector(f1, f2, f3)
end
@@ -453,7 +457,7 @@ end
equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
a = sqrt(equations.gamma * p / rho)
lambda1 = v1
@@ -466,10 +470,10 @@ end
alpha_p = 2 * (equations.gamma - 1) * lambda1_p + lambda2_p + lambda3_p
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1p = rho_2gamma * alpha_p
f2p = rho_2gamma * (alpha_p * v1 + a * (lambda2_p - lambda3_p))
- f3p = rho_2gamma * (alpha_p * 0.5 * v1^2 + a * v1 * (lambda2_p - lambda3_p)
+ f3p = rho_2gamma * (alpha_p * 0.5f0 * v1^2 + a * v1 * (lambda2_p - lambda3_p)
+ a^2 * (lambda2_p + lambda3_p) * equations.inv_gamma_minus_one)
return SVector(f1p, f2p, f3p)
@@ -479,7 +483,7 @@ end
equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
a = sqrt(equations.gamma * p / rho)
lambda1 = v1
@@ -492,10 +496,10 @@ end
alpha_m = 2 * (equations.gamma - 1) * lambda1_m + lambda2_m + lambda3_m
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1m = rho_2gamma * alpha_m
f2m = rho_2gamma * (alpha_m * v1 + a * (lambda2_m - lambda3_m))
- f3m = rho_2gamma * (alpha_m * 0.5 * v1^2 + a * v1 * (lambda2_m - lambda3_m)
+ f3m = rho_2gamma * (alpha_m * 0.5f0 * v1^2 + a * v1 * (lambda2_m - lambda3_m)
+ a^2 * (lambda2_m + lambda3_m) * equations.inv_gamma_minus_one)
return SVector(f1m, f2m, f3m)
@@ -546,7 +550,7 @@ end
equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
# sound speed and enthalpy
a = sqrt(equations.gamma * p / rho)
@@ -555,9 +559,9 @@ end
# signed Mach number
M = v1 / a
- p_plus = 0.5 * (1 + equations.gamma * M) * p
+ p_plus = 0.5f0 * (1 + equations.gamma * M) * p
- f1p = 0.25 * rho * a * (M + 1)^2
+ f1p = 0.25f0 * rho * a * (M + 1)^2
f2p = f1p * v1 + p_plus
f3p = f1p * H
@@ -568,7 +572,7 @@ end
equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
# sound speed and enthalpy
a = sqrt(equations.gamma * p / rho)
@@ -577,9 +581,9 @@ end
# signed Mach number
M = v1 / a
- p_minus = 0.5 * (1 - equations.gamma * M) * p
+ p_minus = 0.5f0 * (1 - equations.gamma * M) * p
- f1m = -0.25 * rho * a * (M - 1)^2
+ f1m = -0.25f0 * rho * a * (M - 1)^2
f2m = f1m * v1 + p_minus
f3m = f1m * H
@@ -634,7 +638,7 @@ end
equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
# sound speed and enthalpy
a = sqrt(equations.gamma * p / rho)
@@ -644,13 +648,13 @@ end
M = v1 / a
P = 2
- mu = 1.0
- nu = 0.75
- omega = 2.0 # adjusted from suggested value of 1.5
+ mu = 1
+ nu = 0.75f0
+ omega = 2 # adjusted from suggested value of 1.5
- p_plus = 0.25 * ((M + 1)^2 * (2 - M) - nu * M * (M^2 - 1)^P) * p
+ p_plus = 0.25f0 * ((M + 1)^2 * (2 - M) - nu * M * (M^2 - 1)^P) * p
- f1p = 0.25 * rho * a * ((M + 1)^2 - mu * (M^2 - 1)^P)
+ f1p = 0.25f0 * rho * a * ((M + 1)^2 - mu * (M^2 - 1)^P)
f2p = f1p * v1 + p_plus
f3p = f1p * H - omega * rho * a^3 * M^2 * (M^2 - 1)^2
@@ -661,7 +665,7 @@ end
equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
# sound speed and enthalpy
a = sqrt(equations.gamma * p / rho)
@@ -671,13 +675,13 @@ end
M = v1 / a
P = 2
- mu = 1.0
- nu = 0.75
- omega = 2.0 # adjusted from suggested value of 1.5
+ mu = 1
+ nu = 0.75f0
+ omega = 2 # adjusted from suggested value of 1.5
- p_minus = 0.25 * ((M - 1)^2 * (2 + M) + nu * M * (M^2 - 1)^P) * p
+ p_minus = 0.25f0 * ((M - 1)^2 * (2 + M) + nu * M * (M^2 - 1)^P) * p
- f1m = -0.25 * rho * a * ((M - 1)^2 - mu * (M^2 - 1)^P)
+ f1m = -0.25f0 * rho * a * ((M - 1)^2 - mu * (M^2 - 1)^P)
f2m = f1m * v1 + p_minus
f3m = f1m * H + omega * rho * a^3 * M^2 * (M^2 - 1)^2
@@ -694,11 +698,11 @@ end
# Calculate primitive variables and speed of sound
v1_ll = rho_v1_ll / rho_ll
v_mag_ll = abs(v1_ll)
- p_ll = (equations.gamma - 1) * (rho_e_ll - 1 / 2 * rho_ll * v_mag_ll^2)
+ p_ll = (equations.gamma - 1) * (rho_e_ll - 0.5f0 * rho_ll * v_mag_ll^2)
c_ll = sqrt(equations.gamma * p_ll / rho_ll)
v1_rr = rho_v1_rr / rho_rr
v_mag_rr = abs(v1_rr)
- p_rr = (equations.gamma - 1) * (rho_e_rr - 1 / 2 * rho_rr * v_mag_rr^2)
+ p_rr = (equations.gamma - 1) * (rho_e_rr - 0.5f0 * rho_rr * v_mag_rr^2)
c_rr = sqrt(equations.gamma * p_rr / rho_rr)
λ_max = max(v_mag_ll, v_mag_rr) + max(c_ll, c_rr)
@@ -746,12 +750,12 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
v1_ll = rho_v1_ll / rho_ll
e_ll = rho_e_ll / rho_ll
- p_ll = (equations.gamma - 1) * (rho_e_ll - 1 / 2 * rho_ll * v1_ll^2)
+ p_ll = (equations.gamma - 1) * (rho_e_ll - 0.5f0 * rho_ll * v1_ll^2)
c_ll = sqrt(equations.gamma * p_ll / rho_ll)
v1_rr = rho_v1_rr / rho_rr
e_rr = rho_e_rr / rho_rr
- p_rr = (equations.gamma - 1) * (rho_e_rr - 1 / 2 * rho_rr * v1_rr^2)
+ p_rr = (equations.gamma - 1) * (rho_e_rr - 0.5f0 * rho_rr * v1_rr^2)
c_rr = sqrt(equations.gamma * p_rr / rho_rr)
# Obtain left and right fluxes
@@ -765,7 +769,7 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
vel_L = v1_ll
vel_R = v1_rr
vel_roe = (sqrt_rho_ll * vel_L + sqrt_rho_rr * vel_R) / sum_sqrt_rho
- ekin_roe = 0.5 * vel_roe^2
+ ekin_roe = 0.5f0 * vel_roe^2
H_ll = (rho_e_ll + p_ll) / rho_ll
H_rr = (rho_e_rr + p_rr) / rho_rr
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho
@@ -775,18 +779,18 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
Ssr = max(vel_R + c_rr, vel_roe + c_roe)
sMu_L = Ssl - vel_L
sMu_R = Ssr - vel_R
- if Ssl >= 0.0
+ if Ssl >= 0
f1 = f_ll[1]
f2 = f_ll[2]
f3 = f_ll[3]
- elseif Ssr <= 0.0
+ elseif Ssr <= 0
f1 = f_rr[1]
f2 = f_rr[2]
f3 = f_rr[3]
else
SStar = (p_rr - p_ll + rho_ll * vel_L * sMu_L - rho_rr * vel_R * sMu_R) /
(rho_ll * sMu_L - rho_rr * sMu_R)
- if Ssl <= 0.0 <= SStar
+ if Ssl <= 0 <= SStar
densStar = rho_ll * sMu_L / (Ssl - SStar)
enerStar = e_ll + (SStar - vel_L) * (SStar + p_ll / (rho_ll * sMu_L))
UStar1 = densStar
@@ -853,15 +857,15 @@ Compactly summarized:
v_roe_mag = v_roe^2
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) * inv_sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * v_roe_mag))
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * v_roe_mag))
# Compute convenience constant for positivity preservation, see
# https://doi.org/10.1016/0021-9991(91)90211-3
- beta = sqrt(0.5 * (equations.gamma - 1) / equations.gamma)
+ beta = sqrt(0.5f0 * (equations.gamma - 1) / equations.gamma)
# Estimate the edges of the Riemann fan (with positivity conservation)
- SsL = min(v_roe - c_roe, v_ll - beta * c_ll, zero(v_roe))
- SsR = max(v_roe + c_roe, v_rr + beta * c_rr, zero(v_roe))
+ SsL = min(v_roe - c_roe, v_ll - beta * c_ll, 0)
+ SsR = max(v_roe + c_roe, v_rr + beta * c_rr, 0)
return SsL, SsR
end
@@ -869,7 +873,7 @@ end
@inline function max_abs_speeds(u, equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 1 / 2 * rho * v1^2)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho * v1^2)
c = sqrt(equations.gamma * p / rho)
return (abs(v1) + c,)
@@ -880,7 +884,7 @@ end
rho, rho_v1, rho_e = u
v1 = rho_v1 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho_v1 * v1)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho_v1 * v1)
return SVector(rho, v1, p)
end
@@ -891,11 +895,12 @@ end
v1 = rho_v1 / rho
v_square = v1^2
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho * v_square)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho * v_square)
s = log(p) - equations.gamma * log(rho)
rho_p = rho / p
- w1 = (equations.gamma - s) * equations.inv_gamma_minus_one - 0.5 * rho_p * v_square
+ w1 = (equations.gamma - s) * equations.inv_gamma_minus_one -
+ 0.5f0 * rho_p * v_square
w2 = rho_p * v1
w3 = -rho_p
@@ -912,7 +917,7 @@ end
V1, V2, V5 = w .* (gamma - 1)
# specific entropy, eq. (53)
- s = gamma - V1 + 0.5 * (V2^2) / V5
+ s = gamma - V1 + 0.5f0 * (V2^2) / V5
# eq. (52)
energy_internal = ((gamma - 1) / (-V5)^gamma)^(equations.inv_gamma_minus_one) *
@@ -921,7 +926,7 @@ end
# eq. (51)
rho = -V5 * energy_internal
rho_v1 = V2 * energy_internal
- rho_e = (1 - 0.5 * (V2^2) / V5) * energy_internal
+ rho_e = (1 - 0.5f0 * (V2^2) / V5) * energy_internal
return SVector(rho, rho_v1, rho_e)
end
@@ -929,7 +934,7 @@ end
@inline function prim2cons(prim, equations::CompressibleEulerEquations1D)
rho, v1, p = prim
rho_v1 = rho * v1
- rho_e = p * equations.inv_gamma_minus_one + 0.5 * (rho_v1 * v1)
+ rho_e = p * equations.inv_gamma_minus_one + 0.5f0 * (rho_v1 * v1)
return SVector(rho, rho_v1, rho_e)
end
@@ -940,20 +945,20 @@ end
@inline function pressure(u, equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1^2) / rho)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1^2) / rho)
return p
end
@inline function density_pressure(u, equations::CompressibleEulerEquations1D)
rho, rho_v1, rho_e = u
- rho_times_p = (equations.gamma - 1) * (rho * rho_e - 0.5 * (rho_v1^2))
+ rho_times_p = (equations.gamma - 1) * (rho * rho_e - 0.5f0 * (rho_v1^2))
return rho_times_p
end
# Calculate thermodynamic entropy for a conservative state `cons`
@inline function entropy_thermodynamic(cons, equations::CompressibleEulerEquations1D)
# Pressure
- p = (equations.gamma - 1) * (cons[3] - 1 / 2 * (cons[2]^2) / cons[1])
+ p = (equations.gamma - 1) * (cons[3] - 0.5f0 * (cons[2]^2) / cons[1])
# Thermodynamic entropy
s = log(p) - equations.gamma * log(cons[1])
@@ -980,7 +985,7 @@ end
# Calculate kinetic energy for a conservative state `cons`
@inline function energy_kinetic(cons, equations::CompressibleEulerEquations1D)
- return 0.5 * (cons[2]^2) / cons[1]
+ return 0.5f0 * (cons[2]^2) / cons[1]
end
# Calculate internal energy for a conservative state `cons`
diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl
index d15c5c65355..e964d9c2b21 100644
--- a/src/equations/compressible_euler_2d.jl
+++ b/src/equations/compressible_euler_2d.jl
@@ -60,10 +60,11 @@ varnames(::typeof(cons2prim), ::CompressibleEulerEquations2D) = ("rho", "v1", "v
A constant initial condition to test free-stream preservation.
"""
function initial_condition_constant(x, t, equations::CompressibleEulerEquations2D)
- rho = 1.0
- rho_v1 = 0.1
- rho_v2 = -0.2
- rho_e = 10.0
+ RealT = eltype(x)
+ rho = 1
+ rho_v1 = convert(RealT, 0.1)
+ rho_v2 = convert(RealT, -0.2)
+ rho_e = 10
return SVector(rho, rho_v1, rho_v2, rho_e)
end
@@ -76,11 +77,12 @@ A smooth initial condition used for convergence tests in combination with
"""
function initial_condition_convergence_test(x, t,
equations::CompressibleEulerEquations2D)
+ RealT = eltype(x)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
ini = c + A * sin(ω * (x[1] + x[2] - t))
rho = ini
@@ -101,11 +103,12 @@ Source terms used for convergence tests in combination with
@inline function source_terms_convergence_test(u, x, t,
equations::CompressibleEulerEquations2D)
# Same settings as in `initial_condition`
+ RealT = eltype(u)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
γ = equations.gamma
x1, x2 = x
@@ -139,13 +142,14 @@ with the following parameters
- polydeg = 5
"""
function initial_condition_density_wave(x, t, equations::CompressibleEulerEquations2D)
- v1 = 0.1
- v2 = 0.2
- rho = 1 + 0.98 * sinpi(2 * (x[1] + x[2] - t * (v1 + v2)))
+ RealT = eltype(x)
+ v1 = convert(RealT, 0.1)
+ v2 = convert(RealT, 0.2)
+ rho = 1 + convert(RealT, 0.98) * sinpi(2 * (x[1] + x[2] - t * (v1 + v2)))
rho_v1 = rho * v1
rho_v2 = rho * v2
p = 20
- rho_e = p / (equations.gamma - 1) + 1 / 2 * rho * (v1^2 + v2^2)
+ rho_e = p / (equations.gamma - 1) + 0.5f0 * rho * (v1^2 + v2^2)
return SVector(rho, rho_v1, rho_v2, rho_e)
end
@@ -161,7 +165,7 @@ function initial_condition_weak_blast_wave(x, t,
equations::CompressibleEulerEquations2D)
# From Hennemann & Gassner JCP paper 2020 (Sec. 6.3)
# Set up polar coordinates
- inicenter = SVector(0.0, 0.0)
+ inicenter = SVector(0, 0)
x_norm = x[1] - inicenter[1]
y_norm = x[2] - inicenter[2]
r = sqrt(x_norm^2 + y_norm^2)
@@ -169,10 +173,11 @@ function initial_condition_weak_blast_wave(x, t,
sin_phi, cos_phi = sincos(phi)
# Calculate primitive variables
- rho = r > 0.5 ? 1.0 : 1.1691
- v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi
- v2 = r > 0.5 ? 0.0 : 0.1882 * sin_phi
- p = r > 0.5 ? 1.0 : 1.245
+ RealT = eltype(x)
+ rho = r > 0.5f0 ? one(RealT) : convert(RealT, 1.1691)
+ v1 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * cos_phi
+ v2 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * sin_phi
+ p = r > 0.5f0 ? one(RealT) : convert(RealT, 1.245)
return prim2cons(SVector(rho, v1, v2, p), equations)
end
@@ -190,18 +195,19 @@ or [`source_terms_eoc_test_euler`](@ref).
function initial_condition_eoc_test_coupled_euler_gravity(x, t,
equations::CompressibleEulerEquations2D)
# OBS! this assumes that γ = 2 other manufactured source terms are incorrect
- if equations.gamma != 2.0
+ if equations.gamma != 2
error("adiabatic constant must be 2 for the coupling convergence test")
end
- c = 2.0
- A = 0.1
- ini = c + A * sin(pi * (x[1] + x[2] - t))
- G = 1.0 # gravitational constant
+ RealT = eltype(x)
+ c = 2
+ A = convert(RealT, 0.1)
+ ini = c + A * sin(convert(RealT, pi) * (x[1] + x[2] - t))
+ G = 1 # gravitational constant
rho = ini
- v1 = 1.0
- v2 = 1.0
- p = ini^2 * G / pi # * 2 / ndims, but ndims==2 here
+ v1 = 1
+ v2 = 1
+ p = ini^2 * G / convert(RealT, pi) # * 2 / ndims, but ndims==2 here
return prim2cons(SVector(rho, v1, v2, p), equations)
end
@@ -218,20 +224,21 @@ in combination with [`initial_condition_eoc_test_coupled_euler_gravity`](@ref).
@inline function source_terms_eoc_test_coupled_euler_gravity(u, x, t,
equations::CompressibleEulerEquations2D)
# Same settings as in `initial_condition_eoc_test_coupled_euler_gravity`
- c = 2.0
- A = 0.1
- G = 1.0 # gravitational constant, must match coupling solver
- C_grav = -2 * G / pi # 2 == 4 / ndims
+ RealT = eltype(u)
+ c = 2
+ A = convert(RealT, 0.1)
+ G = 1 # gravitational constant, must match coupling solver
+ C_grav = -2 * G / convert(RealT, pi) # 2 == 4 / ndims
x1, x2 = x
- si, co = sincos(pi * (x1 + x2 - t))
- rhox = A * pi * co
+ si, co = sincos(convert(RealT, pi) * (x1 + x2 - t))
+ rhox = A * convert(RealT, pi) * co
rho = c + A * si
du1 = rhox
du2 = rhox
du3 = rhox
- du4 = (1.0 - C_grav * rho) * rhox
+ du4 = (1 - C_grav * rho) * rhox
return SVector(du1, du2, du3, du4)
end
@@ -248,14 +255,15 @@ in combination with [`initial_condition_eoc_test_coupled_euler_gravity`](@ref).
@inline function source_terms_eoc_test_euler(u, x, t,
equations::CompressibleEulerEquations2D)
# Same settings as in `initial_condition_eoc_test_coupled_euler_gravity`
- c = 2.0
- A = 0.1
- G = 1.0
- C_grav = -2 * G / pi # 2 == 4 / ndims
+ RealT = eltype(u)
+ c = 2
+ A = convert(RealT, 0.1)
+ G = 1
+ C_grav = -2 * G / convert(RealT, pi) # 2 == 4 / ndims
x1, x2 = x
- si, co = sincos(pi * (x1 + x2 - t))
- rhox = A * pi * co
+ si, co = sincos(convert(RealT, pi) * (x1 + x2 - t))
+ rhox = A * convert(RealT, pi) * co
rho = c + A * si
du1 = rhox
@@ -307,25 +315,25 @@ Should be used together with [`UnstructuredMesh2D`](@ref).
# Eleuterio F. Toro (2009)
# Riemann Solvers and Numerical Methods for Fluid Dynamics: A Practical Introduction
# [DOI: 10.1007/b79761](https://doi.org/10.1007/b79761)
- if v_normal <= 0.0
+ if v_normal <= 0
sound_speed = sqrt(equations.gamma * p_local / rho_local) # local sound speed
p_star = p_local *
- (1 + 0.5 * (equations.gamma - 1) * v_normal / sound_speed)^(2 *
- equations.gamma *
- equations.inv_gamma_minus_one)
- else # v_normal > 0.0
+ (1 + 0.5f0 * (equations.gamma - 1) * v_normal / sound_speed)^(2 *
+ equations.gamma *
+ equations.inv_gamma_minus_one)
+ else # v_normal > 0
A = 2 / ((equations.gamma + 1) * rho_local)
B = p_local * (equations.gamma - 1) / (equations.gamma + 1)
p_star = p_local +
- 0.5 * v_normal / A *
+ 0.5f0 * v_normal / A *
(v_normal + sqrt(v_normal^2 + 4 * A * (p_local + B)))
end
# For the slip wall we directly set the flux as the normal velocity is zero
- return SVector(zero(eltype(u_inner)),
+ return SVector(0,
p_star * normal[1],
p_star * normal[2],
- zero(eltype(u_inner))) * norm_
+ 0) * norm_
end
"""
@@ -339,10 +347,11 @@ Should be used together with [`TreeMesh`](@ref).
surface_flux_function,
equations::CompressibleEulerEquations2D)
# get the appropriate normal vector from the orientation
+ RealT = eltype(u_inner)
if orientation == 1
- normal_direction = SVector(1, 0)
+ normal_direction = SVector(one(RealT), zero(RealT))
else # orientation == 2
- normal_direction = SVector(0, 1)
+ normal_direction = SVector(zero(RealT), one(RealT))
end
# compute and return the flux using `boundary_condition_slip_wall` routine above
@@ -380,7 +389,7 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
if orientation == 1
f1 = rho_v1
f2 = rho_v1 * v1 + p
@@ -434,21 +443,21 @@ The modification is in the energy flux to guarantee pressure equilibrium and was
rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations)
# Average each factor of products in flux
- rho_avg = 1 / 2 * (rho_ll + rho_rr)
- v1_avg = 1 / 2 * (v1_ll + v1_rr)
- v2_avg = 1 / 2 * (v2_ll + v2_rr)
- p_avg = 1 / 2 * (p_ll + p_rr)
- kin_avg = 1 / 2 * (v1_ll * v1_rr + v2_ll * v2_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ kin_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr)
# Calculate fluxes depending on orientation
if orientation == 1
- pv1_avg = 1 / 2 * (p_ll * v1_rr + p_rr * v1_ll)
+ pv1_avg = 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
f1 = rho_avg * v1_avg
f2 = f1 * v1_avg + p_avg
f3 = f1 * v2_avg
f4 = p_avg * v1_avg * equations.inv_gamma_minus_one + f1 * kin_avg + pv1_avg
else
- pv2_avg = 1 / 2 * (p_ll * v2_rr + p_rr * v2_ll)
+ pv2_avg = 0.5f0 * (p_ll * v2_rr + p_rr * v2_ll)
f1 = rho_avg * v2_avg
f2 = f1 * v1_avg
f3 = f1 * v2_avg + p_avg
@@ -467,12 +476,12 @@ end
v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
# Average each factor of products in flux
- rho_avg = 1 / 2 * (rho_ll + rho_rr)
- v1_avg = 1 / 2 * (v1_ll + v1_rr)
- v2_avg = 1 / 2 * (v2_ll + v2_rr)
- v_dot_n_avg = 1 / 2 * (v_dot_n_ll + v_dot_n_rr)
- p_avg = 1 / 2 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v_dot_n_avg = 0.5f0 * (v_dot_n_ll + v_dot_n_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr)
# Calculate fluxes depending on normal_direction
f1 = rho_avg * v_dot_n_avg
@@ -480,7 +489,7 @@ end
f3 = f1 * v2_avg + p_avg * normal_direction[2]
f4 = (f1 * velocity_square_avg +
p_avg * v_dot_n_avg * equations.inv_gamma_minus_one
- + 0.5 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
+ + 0.5f0 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
return SVector(f1, f2, f3, f4)
end
@@ -504,11 +513,11 @@ Kinetic energy preserving two-point flux by
rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations)
# Average each factor of products in flux
- rho_avg = 1 / 2 * (rho_ll + rho_rr)
- v1_avg = 1 / 2 * (v1_ll + v1_rr)
- v2_avg = 1 / 2 * (v2_ll + v2_rr)
- p_avg = 1 / 2 * (p_ll + p_rr)
- e_avg = 1 / 2 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ e_avg = 0.5f0 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
# Calculate fluxes depending on orientation
if orientation == 1
@@ -535,12 +544,12 @@ end
rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations)
# Average each factor of products in flux
- rho_avg = 0.5 * (rho_ll + rho_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
v_dot_n_avg = v1_avg * normal_direction[1] + v2_avg * normal_direction[2]
- p_avg = 0.5 * (p_ll + p_rr)
- e_avg = 0.5 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ e_avg = 0.5f0 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
# Calculate fluxes depending on normal_direction
f1 = rho_avg * v_dot_n_avg
@@ -565,19 +574,19 @@ Entropy conserving two-point flux by
# Unpack left and right state
rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations)
rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations)
- beta_ll = 0.5 * rho_ll / p_ll
- beta_rr = 0.5 * rho_rr / p_rr
- specific_kin_ll = 0.5 * (v1_ll^2 + v2_ll^2)
- specific_kin_rr = 0.5 * (v1_rr^2 + v2_rr^2)
+ beta_ll = 0.5f0 * rho_ll / p_ll
+ beta_rr = 0.5f0 * rho_rr / p_rr
+ specific_kin_ll = 0.5f0 * (v1_ll^2 + v2_ll^2)
+ specific_kin_rr = 0.5f0 * (v1_rr^2 + v2_rr^2)
# Compute the necessary mean values
- rho_avg = 0.5 * (rho_ll + rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
rho_mean = ln_mean(rho_ll, rho_rr)
beta_mean = ln_mean(beta_ll, beta_rr)
- beta_avg = 0.5 * (beta_ll + beta_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- p_mean = 0.5 * rho_avg / beta_avg
+ beta_avg = 0.5f0 * (beta_ll + beta_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ p_mean = 0.5f0 * rho_avg / beta_avg
velocity_square_avg = specific_kin_ll + specific_kin_rr
# Calculate fluxes depending on orientation
@@ -585,13 +594,15 @@ Entropy conserving two-point flux by
f1 = rho_mean * v1_avg
f2 = f1 * v1_avg + p_mean
f3 = f1 * v2_avg
- f4 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f4 = f1 * 0.5f0 *
+ (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg + f3 * v2_avg
else
f1 = rho_mean * v2_avg
f2 = f1 * v1_avg
f3 = f1 * v2_avg + p_mean
- f4 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f4 = f1 * 0.5f0 *
+ (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg + f3 * v2_avg
end
@@ -605,26 +616,26 @@ end
rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations)
v_dot_n_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2]
v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
- beta_ll = 0.5 * rho_ll / p_ll
- beta_rr = 0.5 * rho_rr / p_rr
- specific_kin_ll = 0.5 * (v1_ll^2 + v2_ll^2)
- specific_kin_rr = 0.5 * (v1_rr^2 + v2_rr^2)
+ beta_ll = 0.5f0 * rho_ll / p_ll
+ beta_rr = 0.5f0 * rho_rr / p_rr
+ specific_kin_ll = 0.5f0 * (v1_ll^2 + v2_ll^2)
+ specific_kin_rr = 0.5f0 * (v1_rr^2 + v2_rr^2)
# Compute the necessary mean values
- rho_avg = 0.5 * (rho_ll + rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
rho_mean = ln_mean(rho_ll, rho_rr)
beta_mean = ln_mean(beta_ll, beta_rr)
- beta_avg = 0.5 * (beta_ll + beta_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- p_mean = 0.5 * rho_avg / beta_avg
+ beta_avg = 0.5f0 * (beta_ll + beta_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ p_mean = 0.5f0 * rho_avg / beta_avg
velocity_square_avg = specific_kin_ll + specific_kin_rr
# Multiply with average of normal velocities
- f1 = rho_mean * 0.5 * (v_dot_n_ll + v_dot_n_rr)
+ f1 = rho_mean * 0.5f0 * (v_dot_n_ll + v_dot_n_rr)
f2 = f1 * v1_avg + p_mean * normal_direction[1]
f3 = f1 * v2_avg + p_mean * normal_direction[2]
- f4 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f4 = f1 * 0.5f0 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg + f3 * v2_avg
return SVector(f1, f2, f3, f4)
@@ -658,10 +669,10 @@ See also
# log((ϱₗ/pₗ) / (ϱᵣ/pᵣ)) / (ϱₗ/pₗ - ϱᵣ/pᵣ)
# = pₗ pᵣ log((ϱₗ pᵣ) / (ϱᵣ pₗ)) / (ϱₗ pᵣ - ϱᵣ pₗ)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- p_avg = 0.5 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr)
# Calculate fluxes depending on orientation
if orientation == 1
@@ -670,14 +681,14 @@ See also
f3 = f1 * v2_avg
f4 = f1 *
(velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) +
- 0.5 * (p_ll * v1_rr + p_rr * v1_ll)
+ 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
else
f1 = rho_mean * v2_avg
f2 = f1 * v1_avg
f3 = f1 * v2_avg + p_avg
f4 = f1 *
(velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) +
- 0.5 * (p_ll * v2_rr + p_rr * v2_ll)
+ 0.5f0 * (p_ll * v2_rr + p_rr * v2_ll)
end
return SVector(f1, f2, f3, f4)
@@ -698,18 +709,18 @@ end
# log((ϱₗ/pₗ) / (ϱᵣ/pᵣ)) / (ϱₗ/pₗ - ϱᵣ/pᵣ)
# = pₗ pᵣ log((ϱₗ pᵣ) / (ϱᵣ pₗ)) / (ϱₗ pᵣ - ϱᵣ pₗ)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- p_avg = 0.5 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr)
# Calculate fluxes depending on normal_direction
- f1 = rho_mean * 0.5 * (v_dot_n_ll + v_dot_n_rr)
+ f1 = rho_mean * 0.5f0 * (v_dot_n_ll + v_dot_n_rr)
f2 = f1 * v1_avg + p_avg * normal_direction[1]
f3 = f1 * v2_avg + p_avg * normal_direction[2]
f4 = (f1 * (velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one)
+
- 0.5 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
+ 0.5f0 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
return SVector(f1, f2, f3, f4)
end
@@ -752,7 +763,7 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
if orientation == 1
@@ -766,12 +777,12 @@ end
alpha_p = 2 * (equations.gamma - 1) * lambda1_p + lambda2_p + lambda3_p
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1p = rho_2gamma * alpha_p
f2p = rho_2gamma * (alpha_p * v1 + a * (lambda2_p - lambda3_p))
f3p = rho_2gamma * alpha_p * v2
f4p = rho_2gamma *
- (alpha_p * 0.5 * (v1^2 + v2^2) + a * v1 * (lambda2_p - lambda3_p)
+ (alpha_p * 0.5f0 * (v1^2 + v2^2) + a * v1 * (lambda2_p - lambda3_p)
+ a^2 * (lambda2_p + lambda3_p) * equations.inv_gamma_minus_one)
else # orientation == 2
lambda1 = v2
@@ -784,12 +795,12 @@ end
alpha_p = 2 * (equations.gamma - 1) * lambda1_p + lambda2_p + lambda3_p
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1p = rho_2gamma * alpha_p
f2p = rho_2gamma * alpha_p * v1
f3p = rho_2gamma * (alpha_p * v2 + a * (lambda2_p - lambda3_p))
f4p = rho_2gamma *
- (alpha_p * 0.5 * (v1^2 + v2^2) + a * v2 * (lambda2_p - lambda3_p)
+ (alpha_p * 0.5f0 * (v1^2 + v2^2) + a * v2 * (lambda2_p - lambda3_p)
+ a^2 * (lambda2_p + lambda3_p) * equations.inv_gamma_minus_one)
end
return SVector(f1p, f2p, f3p, f4p)
@@ -800,7 +811,7 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
if orientation == 1
@@ -814,12 +825,12 @@ end
alpha_m = 2 * (equations.gamma - 1) * lambda1_m + lambda2_m + lambda3_m
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1m = rho_2gamma * alpha_m
f2m = rho_2gamma * (alpha_m * v1 + a * (lambda2_m - lambda3_m))
f3m = rho_2gamma * alpha_m * v2
f4m = rho_2gamma *
- (alpha_m * 0.5 * (v1^2 + v2^2) + a * v1 * (lambda2_m - lambda3_m)
+ (alpha_m * 0.5f0 * (v1^2 + v2^2) + a * v1 * (lambda2_m - lambda3_m)
+ a^2 * (lambda2_m + lambda3_m) * equations.inv_gamma_minus_one)
else # orientation == 2
lambda1 = v2
@@ -832,12 +843,12 @@ end
alpha_m = 2 * (equations.gamma - 1) * lambda1_m + lambda2_m + lambda3_m
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1m = rho_2gamma * alpha_m
f2m = rho_2gamma * alpha_m * v1
f3m = rho_2gamma * (alpha_m * v2 + a * (lambda2_m - lambda3_m))
f4m = rho_2gamma *
- (alpha_m * 0.5 * (v1^2 + v2^2) + a * v2 * (lambda2_m - lambda3_m)
+ (alpha_m * 0.5f0 * (v1^2 + v2^2) + a * v2 * (lambda2_m - lambda3_m)
+ a^2 * (lambda2_m + lambda3_m) * equations.inv_gamma_minus_one)
end
return SVector(f1m, f2m, f3m, f4m)
@@ -888,7 +899,7 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
@@ -899,8 +910,8 @@ end
lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :)
lambda2_p = positive_part(lambda2)
- rhoa_2gamma = 0.5 * rho * a / equations.gamma
- f1p = 0.5 * rho * (lambda1_p + lambda2_p)
+ rhoa_2gamma = 0.5f0 * rho * a / equations.gamma
+ f1p = 0.5f0 * rho * (lambda1_p + lambda2_p)
f2p = f1p * v1 + rhoa_2gamma * (lambda1_p - lambda2_p)
f3p = f1p * v2
f4p = f1p * H
@@ -911,8 +922,8 @@ end
lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :)
lambda2_p = positive_part(lambda2)
- rhoa_2gamma = 0.5 * rho * a / equations.gamma
- f1p = 0.5 * rho * (lambda1_p + lambda2_p)
+ rhoa_2gamma = 0.5f0 * rho * a / equations.gamma
+ f1p = 0.5f0 * rho * (lambda1_p + lambda2_p)
f2p = f1p * v1
f3p = f1p * v2 + rhoa_2gamma * (lambda1_p - lambda2_p)
f4p = f1p * H
@@ -925,7 +936,7 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
@@ -936,8 +947,8 @@ end
lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :)
lambda2_m = negative_part(lambda2)
- rhoa_2gamma = 0.5 * rho * a / equations.gamma
- f1m = 0.5 * rho * (lambda1_m + lambda2_m)
+ rhoa_2gamma = 0.5f0 * rho * a / equations.gamma
+ f1m = 0.5f0 * rho * (lambda1_m + lambda2_m)
f2m = f1m * v1 + rhoa_2gamma * (lambda1_m - lambda2_m)
f3m = f1m * v2
f4m = f1m * H
@@ -948,8 +959,8 @@ end
lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :)
lambda2_m = negative_part(lambda2)
- rhoa_2gamma = 0.5 * rho * a / equations.gamma
- f1m = 0.5 * rho * (lambda1_m + lambda2_m)
+ rhoa_2gamma = 0.5f0 * rho * a / equations.gamma
+ f1m = 0.5f0 * rho * (lambda1_m + lambda2_m)
f2m = f1m * v1
f3m = f1m * v2 + rhoa_2gamma * (lambda1_m - lambda2_m)
f4m = f1m * H
@@ -963,7 +974,7 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
@@ -975,8 +986,8 @@ end
lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :)
lambda2_p = positive_part(lambda2)
- rhoa_2gamma = 0.5 * rho * a / equations.gamma
- f1p = 0.5 * rho * (lambda1_p + lambda2_p)
+ rhoa_2gamma = 0.5f0 * rho * a / equations.gamma
+ f1p = 0.5f0 * rho * (lambda1_p + lambda2_p)
f2p = f1p * v1 + rhoa_2gamma * normal_direction[1] * (lambda1_p - lambda2_p)
f3p = f1p * v2 + rhoa_2gamma * normal_direction[2] * (lambda1_p - lambda2_p)
f4p = f1p * H
@@ -990,7 +1001,7 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
@@ -1002,8 +1013,8 @@ end
lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :)
lambda2_m = negative_part(lambda2)
- rhoa_2gamma = 0.5 * rho * a / equations.gamma
- f1m = 0.5 * rho * (lambda1_m + lambda2_m)
+ rhoa_2gamma = 0.5f0 * rho * a / equations.gamma
+ f1m = 0.5f0 * rho * (lambda1_m + lambda2_m)
f2m = f1m * v1 + rhoa_2gamma * normal_direction[1] * (lambda1_m - lambda2_m)
f3m = f1m * v2 + rhoa_2gamma * normal_direction[2] * (lambda1_m - lambda2_m)
f4m = f1m * H
@@ -1045,9 +1056,9 @@ end
v_rr = v2_rr
end
- rho = 0.5 * (rho_ll + rho_rr)
- p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll)
- v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll)
+ rho = 0.5f0 * (rho_ll + rho_rr)
+ p = 0.5f0 * (p_ll + p_rr) - 0.5f0 * c * rho * (v_rr - v_ll)
+ v = 0.5f0 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll)
# We treat the energy term analogous to the potential temperature term in the paper by
# Chen et al., i.e. we use p_ll and p_rr, and not p
@@ -1083,9 +1094,9 @@ end
# and then multiplying v by `norm_` again, but this version is slightly faster.
norm_ = norm(normal_direction)
- rho = 0.5 * (rho_ll + rho_rr)
- p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) / norm_
- v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_
+ rho = 0.5f0 * (rho_ll + rho_rr)
+ p = 0.5f0 * (p_ll + p_rr) - 0.5f0 * c * rho * (v_rr - v_ll) / norm_
+ v = 0.5f0 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_
# We treat the energy term analogous to the potential temperature term in the paper by
# Chen et al., i.e. we use p_ll and p_rr, and not p
@@ -1155,24 +1166,24 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
if orientation == 1
M = v1 / a
- p_plus = 0.5 * (1 + equations.gamma * M) * p
+ p_plus = 0.5f0 * (1 + equations.gamma * M) * p
- f1p = 0.25 * rho * a * (M + 1)^2
+ f1p = 0.25f0 * rho * a * (M + 1)^2
f2p = f1p * v1 + p_plus
f3p = f1p * v2
f4p = f1p * H
else # orientation == 2
M = v2 / a
- p_plus = 0.5 * (1 + equations.gamma * M) * p
+ p_plus = 0.5f0 * (1 + equations.gamma * M) * p
- f1p = 0.25 * rho * a * (M + 1)^2
+ f1p = 0.25f0 * rho * a * (M + 1)^2
f2p = f1p * v1
f3p = f1p * v2 + p_plus
f4p = f1p * H
@@ -1185,24 +1196,24 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
if orientation == 1
M = v1 / a
- p_minus = 0.5 * (1 - equations.gamma * M) * p
+ p_minus = 0.5f0 * (1 - equations.gamma * M) * p
- f1m = -0.25 * rho * a * (M - 1)^2
+ f1m = -0.25f0 * rho * a * (M - 1)^2
f2m = f1m * v1 + p_minus
f3m = f1m * v2
f4m = f1m * H
else # orientation == 2
M = v2 / a
- p_minus = 0.5 * (1 - equations.gamma * M) * p
+ p_minus = 0.5f0 * (1 - equations.gamma * M) * p
- f1m = -0.25 * rho * a * (M - 1)^2
+ f1m = -0.25f0 * rho * a * (M - 1)^2
f2m = f1m * v1
f3m = f1m * v2 + p_minus
f4m = f1m * H
@@ -1216,16 +1227,16 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
v_n = normal_direction[1] * v1 + normal_direction[2] * v2
M = v_n / a
- p_plus = 0.5 * (1 + equations.gamma * M) * p
+ p_plus = 0.5f0 * (1 + equations.gamma * M) * p
- f1p = 0.25 * rho * a * (M + 1)^2
+ f1p = 0.25f0 * rho * a * (M + 1)^2
f2p = f1p * v1 + normal_direction[1] * p_plus
f3p = f1p * v2 + normal_direction[2] * p_plus
f4p = f1p * H
@@ -1239,16 +1250,16 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
v_n = normal_direction[1] * v1 + normal_direction[2] * v2
M = v_n / a
- p_minus = 0.5 * (1 - equations.gamma * M) * p
+ p_minus = 0.5f0 * (1 - equations.gamma * M) * p
- f1m = -0.25 * rho * a * (M - 1)^2
+ f1m = -0.25f0 * rho * a * (M - 1)^2
f2m = f1m * v1 + normal_direction[1] * p_minus
f3m = f1m * v2 + normal_direction[2] * p_minus
f4m = f1m * H
@@ -1289,24 +1300,24 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
- lambda = 0.5 * (sqrt(v1^2 + v2^2) + a)
+ lambda = 0.5f0 * (sqrt(v1^2 + v2^2) + a)
if orientation == 1
#lambda = 0.5 * (abs(v1) + a)
- f1p = 0.5 * rho * v1 + lambda * u[1]
- f2p = 0.5 * rho * v1 * v1 + 0.5 * p + lambda * u[2]
- f3p = 0.5 * rho * v1 * v2 + lambda * u[3]
- f4p = 0.5 * rho * v1 * H + lambda * u[4]
+ f1p = 0.5f0 * rho * v1 + lambda * u[1]
+ f2p = 0.5f0 * rho * v1 * v1 + 0.5f0 * p + lambda * u[2]
+ f3p = 0.5f0 * rho * v1 * v2 + lambda * u[3]
+ f4p = 0.5f0 * rho * v1 * H + lambda * u[4]
else # orientation == 2
#lambda = 0.5 * (abs(v2) + a)
- f1p = 0.5 * rho * v2 + lambda * u[1]
- f2p = 0.5 * rho * v2 * v1 + lambda * u[2]
- f3p = 0.5 * rho * v2 * v2 + 0.5 * p + lambda * u[3]
- f4p = 0.5 * rho * v2 * H + lambda * u[4]
+ f1p = 0.5f0 * rho * v2 + lambda * u[1]
+ f2p = 0.5f0 * rho * v2 * v1 + lambda * u[2]
+ f3p = 0.5f0 * rho * v2 * v2 + 0.5f0 * p + lambda * u[3]
+ f4p = 0.5f0 * rho * v2 * H + lambda * u[4]
end
return SVector(f1p, f2p, f3p, f4p)
end
@@ -1316,24 +1327,24 @@ end
rho, rho_v1, rho_v2, rho_e = u
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
- lambda = 0.5 * (sqrt(v1^2 + v2^2) + a)
+ lambda = 0.5f0 * (sqrt(v1^2 + v2^2) + a)
if orientation == 1
#lambda = 0.5 * (abs(v1) + a)
- f1m = 0.5 * rho * v1 - lambda * u[1]
- f2m = 0.5 * rho * v1 * v1 + 0.5 * p - lambda * u[2]
- f3m = 0.5 * rho * v1 * v2 - lambda * u[3]
- f4m = 0.5 * rho * v1 * H - lambda * u[4]
+ f1m = 0.5f0 * rho * v1 - lambda * u[1]
+ f2m = 0.5f0 * rho * v1 * v1 + 0.5f0 * p - lambda * u[2]
+ f3m = 0.5f0 * rho * v1 * v2 - lambda * u[3]
+ f4m = 0.5f0 * rho * v1 * H - lambda * u[4]
else # orientation == 2
#lambda = 0.5 * (abs(v2) + a)
- f1m = 0.5 * rho * v2 - lambda * u[1]
- f2m = 0.5 * rho * v2 * v1 - lambda * u[2]
- f3m = 0.5 * rho * v2 * v2 + 0.5 * p - lambda * u[3]
- f4m = 0.5 * rho * v2 * H - lambda * u[4]
+ f1m = 0.5f0 * rho * v2 - lambda * u[1]
+ f2m = 0.5f0 * rho * v2 * v1 - lambda * u[2]
+ f3m = 0.5f0 * rho * v2 * v2 + 0.5f0 * p - lambda * u[3]
+ f4m = 0.5f0 * rho * v2 * H - lambda * u[4]
end
return SVector(f1m, f2m, f3m, f4m)
end
@@ -1346,15 +1357,15 @@ end
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
- lambda = 0.5 * (sqrt(v1^2 + v2^2) + a)
+ lambda = 0.5f0 * (sqrt(v1^2 + v2^2) + a)
v_normal = v1 * normal_direction[1] + v2 * normal_direction[2]
rho_v_normal = rho * v_normal
- f1p = 0.5 * rho_v_normal + lambda * u[1]
- f2p = 0.5 * rho_v_normal * v1 + 0.5 * p * normal_direction[1] + lambda * u[2]
- f3p = 0.5 * rho_v_normal * v2 + 0.5 * p * normal_direction[2] + lambda * u[3]
- f4p = 0.5 * rho_v_normal * H + lambda * u[4]
+ f1p = 0.5f0 * rho_v_normal + lambda * u[1]
+ f2p = 0.5f0 * rho_v_normal * v1 + 0.5f0 * p * normal_direction[1] + lambda * u[2]
+ f3p = 0.5f0 * rho_v_normal * v2 + 0.5f0 * p * normal_direction[2] + lambda * u[3]
+ f4p = 0.5f0 * rho_v_normal * H + lambda * u[4]
return SVector(f1p, f2p, f3p, f4p)
end
@@ -1367,15 +1378,15 @@ end
a = sqrt(equations.gamma * p / rho)
H = (rho_e + p) / rho
- lambda = 0.5 * (sqrt(v1^2 + v2^2) + a)
+ lambda = 0.5f0 * (sqrt(v1^2 + v2^2) + a)
v_normal = v1 * normal_direction[1] + v2 * normal_direction[2]
rho_v_normal = rho * v_normal
- f1m = 0.5 * rho_v_normal - lambda * u[1]
- f2m = 0.5 * rho_v_normal * v1 + 0.5 * p * normal_direction[1] - lambda * u[2]
- f3m = 0.5 * rho_v_normal * v2 + 0.5 * p * normal_direction[2] - lambda * u[3]
- f4m = 0.5 * rho_v_normal * H - lambda * u[4]
+ f1m = 0.5f0 * rho_v_normal - lambda * u[1]
+ f2m = 0.5f0 * rho_v_normal * v1 + 0.5f0 * p * normal_direction[1] - lambda * u[2]
+ f3m = 0.5f0 * rho_v_normal * v2 + 0.5f0 * p * normal_direction[2] - lambda * u[3]
+ f4m = 0.5f0 * rho_v_normal * H - lambda * u[4]
return SVector(f1m, f2m, f3m, f4m)
end
@@ -1555,13 +1566,13 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
v1_ll = rho_v1_ll / rho_ll
v2_ll = rho_v2_ll / rho_ll
e_ll = rho_e_ll / rho_ll
- p_ll = (equations.gamma - 1) * (rho_e_ll - 1 / 2 * rho_ll * (v1_ll^2 + v2_ll^2))
+ p_ll = (equations.gamma - 1) * (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2 + v2_ll^2))
c_ll = sqrt(equations.gamma * p_ll / rho_ll)
v1_rr = rho_v1_rr / rho_rr
v2_rr = rho_v2_rr / rho_rr
e_rr = rho_e_rr / rho_rr
- p_rr = (equations.gamma - 1) * (rho_e_rr - 1 / 2 * rho_rr * (v1_rr^2 + v2_rr^2))
+ p_rr = (equations.gamma - 1) * (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2 + v2_rr^2))
c_rr = sqrt(equations.gamma * p_rr / rho_rr)
# Obtain left and right fluxes
@@ -1586,18 +1597,18 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
H_ll = (rho_e_ll + p_ll) / rho_ll
H_rr = (rho_e_rr + p_rr) / rho_rr
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag))
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * vel_roe_mag))
Ssl = min(vel_L - c_ll, vel_roe - c_roe)
Ssr = max(vel_R + c_rr, vel_roe + c_roe)
sMu_L = Ssl - vel_L
sMu_R = Ssr - vel_R
- if Ssl >= 0.0
+ if Ssl >= 0
f1 = f_ll[1]
f2 = f_ll[2]
f3 = f_ll[3]
f4 = f_ll[4]
- elseif Ssr <= 0.0
+ elseif Ssr <= 0
f1 = f_rr[1]
f2 = f_rr[2]
f3 = f_rr[3]
@@ -1605,7 +1616,7 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
else
SStar = (p_rr - p_ll + rho_ll * vel_L * sMu_L - rho_rr * vel_R * sMu_R) /
(rho_ll * sMu_L - rho_rr * sMu_R)
- if Ssl <= 0.0 <= SStar
+ if Ssl <= 0 <= SStar
densStar = rho_ll * sMu_L / (Ssl - SStar)
enerStar = e_ll + (SStar - vel_L) * (SStar + p_ll / (rho_ll * sMu_L))
UStar1 = densStar
@@ -1679,19 +1690,19 @@ function flux_hllc(u_ll, u_rr, normal_direction::AbstractVector,
H_rr = (u_rr[4] + p_rr) / rho_rr
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag)) * norm_
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * vel_roe_mag)) * norm_
Ssl = min(v_dot_n_ll - c_ll, vel_roe - c_roe)
Ssr = max(v_dot_n_rr + c_rr, vel_roe + c_roe)
sMu_L = Ssl - v_dot_n_ll
sMu_R = Ssr - v_dot_n_rr
- if Ssl >= 0.0
+ if Ssl >= 0
f1 = f_ll[1]
f2 = f_ll[2]
f3 = f_ll[3]
f4 = f_ll[4]
- elseif Ssr <= 0.0
+ elseif Ssr <= 0
f1 = f_rr[1]
f2 = f_rr[2]
f3 = f_rr[3]
@@ -1699,7 +1710,7 @@ function flux_hllc(u_ll, u_rr, normal_direction::AbstractVector,
else
SStar = (rho_ll * v_dot_n_ll * sMu_L - rho_rr * v_dot_n_rr * sMu_R +
(p_rr - p_ll) * norm_sq) / (rho_ll * sMu_L - rho_rr * sMu_R)
- if Ssl <= 0.0 <= SStar
+ if Ssl <= 0 <= SStar
densStar = rho_ll * sMu_L / (Ssl - SStar)
enerStar = e_ll +
(SStar - v_dot_n_ll) *
@@ -1773,19 +1784,19 @@ of the numerical flux.
v_roe_mag = v1_roe^2 + v2_roe^2
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) * inv_sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * v_roe_mag))
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * v_roe_mag))
# Compute convenience constant for positivity preservation, see
# https://doi.org/10.1016/0021-9991(91)90211-3
- beta = sqrt(0.5 * (equations.gamma - 1) / equations.gamma)
+ beta = sqrt(0.5f0 * (equations.gamma - 1) / equations.gamma)
# Estimate the edges of the Riemann fan (with positivity conservation)
if orientation == 1 # x-direction
- SsL = min(v1_roe - c_roe, v1_ll - beta * c_ll, zero(v1_roe))
- SsR = max(v1_roe + c_roe, v1_rr + beta * c_rr, zero(v1_roe))
+ SsL = min(v1_roe - c_roe, v1_ll - beta * c_ll, 0)
+ SsR = max(v1_roe + c_roe, v1_rr + beta * c_rr, 0)
elseif orientation == 2 # y-direction
- SsL = min(v2_roe - c_roe, v2_ll - beta * c_ll, zero(v2_roe))
- SsR = max(v2_roe + c_roe, v2_rr + beta * c_rr, zero(v2_roe))
+ SsL = min(v2_roe - c_roe, v2_ll - beta * c_ll, 0)
+ SsR = max(v2_roe + c_roe, v2_rr + beta * c_rr, 0)
end
return SsL, SsR
@@ -1836,15 +1847,15 @@ of the numerical flux.
v_roe_mag = v1_roe^2 + v2_roe^2
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) * inv_sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * v_roe_mag)) * norm_
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * v_roe_mag)) * norm_
# Compute convenience constant for positivity preservation, see
# https://doi.org/10.1016/0021-9991(91)90211-3
- beta = sqrt(0.5 * (equations.gamma - 1) / equations.gamma)
+ beta = sqrt(0.5f0 * (equations.gamma - 1) / equations.gamma)
# Estimate the edges of the Riemann fan (with positivity conservation)
- SsL = min(v_roe - c_roe, v_dot_n_ll - beta * c_ll, zero(v_roe))
- SsR = max(v_roe + c_roe, v_dot_n_rr + beta * c_rr, zero(v_roe))
+ SsL = min(v_roe - c_roe, v_dot_n_ll - beta * c_ll, 0)
+ SsR = max(v_roe + c_roe, v_dot_n_rr + beta * c_rr, 0)
return SsL, SsR
end
@@ -1862,7 +1873,7 @@ end
v1 = rho_v1 / rho
v2 = rho_v2 / rho
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2))
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2))
return SVector(rho, v1, v2, p)
end
@@ -1874,11 +1885,12 @@ end
v1 = rho_v1 / rho
v2 = rho_v2 / rho
v_square = v1^2 + v2^2
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho * v_square)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho * v_square)
s = log(p) - equations.gamma * log(rho)
rho_p = rho / p
- w1 = (equations.gamma - s) * equations.inv_gamma_minus_one - 0.5 * rho_p * v_square
+ w1 = (equations.gamma - s) * equations.inv_gamma_minus_one -
+ 0.5f0 * rho_p * v_square
w2 = rho_p * v1
w3 = rho_p * v2
w4 = -rho_p
@@ -1895,11 +1907,11 @@ end
v1 = rho_v1 / rho
v2 = rho_v2 / rho
v_square = v1^2 + v2^2
- inv_rho_gammap1 = (1 / rho)^(equations.gamma + 1.0)
+ inv_rho_gammap1 = (1 / rho)^(equations.gamma + 1)
# The derivative vector for the modified specific entropy of Guermond et al.
w1 = inv_rho_gammap1 *
- (0.5 * rho * (equations.gamma + 1.0) * v_square - equations.gamma * rho_e)
+ (0.5f0 * rho * (equations.gamma + 1) * v_square - equations.gamma * rho_e)
w2 = -rho_v1 * inv_rho_gammap1
w3 = -rho_v2 * inv_rho_gammap1
w4 = (1 / rho)^equations.gamma
@@ -1936,7 +1948,7 @@ end
rho, v1, v2, p = prim
rho_v1 = rho * v1
rho_v2 = rho * v2
- rho_e = p * equations.inv_gamma_minus_one + 0.5 * (rho_v1 * v1 + rho_v2 * v2)
+ rho_e = p * equations.inv_gamma_minus_one + 0.5f0 * (rho_v1 * v1 + rho_v2 * v2)
return SVector(rho, rho_v1, rho_v2, rho_e)
end
@@ -1947,7 +1959,7 @@ end
@inline function pressure(u, equations::CompressibleEulerEquations2D)
rho, rho_v1, rho_v2, rho_e = u
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1^2 + rho_v2^2) / rho)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1^2 + rho_v2^2) / rho)
return p
end
@@ -1960,12 +1972,12 @@ end
v2 = rho_v2 / rho
v_square = v1^2 + v2^2
- return (equations.gamma - 1.0) * SVector(0.5 * v_square, -v1, -v2, 1.0)
+ return (equations.gamma - 1) * SVector(0.5f0 * v_square, -v1, -v2, 1)
end
@inline function density_pressure(u, equations::CompressibleEulerEquations2D)
rho, rho_v1, rho_v2, rho_e = u
- rho_times_p = (equations.gamma - 1) * (rho * rho_e - 0.5 * (rho_v1^2 + rho_v2^2))
+ rho_times_p = (equations.gamma - 1) * (rho * rho_e - 0.5f0 * (rho_v1^2 + rho_v2^2))
return rho_times_p
end
@@ -1995,7 +2007,7 @@ end
# Calculate thermodynamic entropy for a conservative state `cons`
@inline function entropy_thermodynamic(cons, equations::CompressibleEulerEquations2D)
# Pressure
- p = (equations.gamma - 1) * (cons[4] - 1 / 2 * (cons[2]^2 + cons[3]^2) / cons[1])
+ p = (equations.gamma - 1) * (cons[4] - 0.5f0 * (cons[2]^2 + cons[3]^2) / cons[1])
# Thermodynamic entropy
s = log(p) - equations.gamma * log(cons[1])
@@ -2034,7 +2046,7 @@ Note: This is *not* the "conventional" specific entropy ``s = ln(p / \rho^\gamma
rho, rho_v1, rho_v2, rho_e = u
# Modified specific entropy from Guermond et al. (2019)
- s = (rho_e - 0.5 * (rho_v1^2 + rho_v2^2) / rho) * (1 / rho)^equations.gamma
+ s = (rho_e - 0.5f0 * (rho_v1^2 + rho_v2^2) / rho) * (1 / rho)^equations.gamma
return s
end
@@ -2067,7 +2079,7 @@ end
# State validation for Newton-bisection method of subcell IDP limiting
@inline function Base.isvalid(u, equations::CompressibleEulerEquations2D)
p = pressure(u, equations)
- if u[1] <= 0.0 || p <= 0.0
+ if u[1] <= 0 || p <= 0
return false
end
return true
diff --git a/src/equations/compressible_euler_3d.jl b/src/equations/compressible_euler_3d.jl
index f156aa29689..4f4d4553a8f 100644
--- a/src/equations/compressible_euler_3d.jl
+++ b/src/equations/compressible_euler_3d.jl
@@ -67,11 +67,12 @@ end
A constant initial condition to test free-stream preservation.
"""
function initial_condition_constant(x, t, equations::CompressibleEulerEquations3D)
- rho = 1.0
- rho_v1 = 0.1
- rho_v2 = -0.2
- rho_v3 = 0.7
- rho_e = 10.0
+ RealT = eltype(x)
+ rho = 1
+ rho_v1 = convert(RealT, 0.1)
+ rho_v2 = convert(RealT, -0.2)
+ rho_v3 = convert(RealT, 0.7)
+ rho_e = 10
return SVector(rho, rho_v1, rho_v2, rho_v3, rho_e)
end
@@ -83,11 +84,12 @@ A smooth initial condition used for convergence tests in combination with
"""
function initial_condition_convergence_test(x, t,
equations::CompressibleEulerEquations3D)
+ RealT = eltype(x)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
ini = c + A * sin(ω * (x[1] + x[2] + x[3] - t))
rho = ini
@@ -108,11 +110,12 @@ Source terms used for convergence tests in combination with
@inline function source_terms_convergence_test(u, x, t,
equations::CompressibleEulerEquations3D)
# Same settings as in `initial_condition`
+ RealT = eltype(u)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
γ = equations.gamma
x1, x2, x3 = x
@@ -121,7 +124,7 @@ Source terms used for convergence tests in combination with
rho_x = ω * A * co
# Note that d/dt rho = -d/dx rho = -d/dy rho = - d/dz rho.
- tmp = (2 * rho - 1.5) * (γ - 1)
+ tmp = (2 * rho - 1.5f0) * (γ - 1)
du1 = 2 * rho_x
du2 = rho_x * (2 + tmp)
@@ -144,20 +147,21 @@ function initial_condition_weak_blast_wave(x, t,
equations::CompressibleEulerEquations3D)
# From Hennemann & Gassner JCP paper 2020 (Sec. 6.3)
# Set up spherical coordinates
+ RealT = eltype(x)
inicenter = (0, 0, 0)
x_norm = x[1] - inicenter[1]
y_norm = x[2] - inicenter[2]
z_norm = x[3] - inicenter[3]
r = sqrt(x_norm^2 + y_norm^2 + z_norm^2)
phi = atan(y_norm, x_norm)
- theta = iszero(r) ? 0.0 : acos(z_norm / r)
+ theta = iszero(r) ? zero(RealT) : acos(z_norm / r)
# Calculate primitive variables
- rho = r > 0.5 ? 1.0 : 1.1691
- v1 = r > 0.5 ? 0.0 : 0.1882 * cos(phi) * sin(theta)
- v2 = r > 0.5 ? 0.0 : 0.1882 * sin(phi) * sin(theta)
- v3 = r > 0.5 ? 0.0 : 0.1882 * cos(theta)
- p = r > 0.5 ? 1.0 : 1.245
+ rho = r > 0.5f0 ? one(RealT) : convert(RealT, 1.1691)
+ v1 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * cos(phi) * sin(theta)
+ v2 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * sin(phi) * sin(theta)
+ v3 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * cos(theta)
+ p = r > 0.5f0 ? one(RealT) : convert(RealT, 1.245)
return prim2cons(SVector(rho, v1, v2, v3, p), equations)
end
@@ -175,19 +179,20 @@ or [`source_terms_eoc_test_euler`](@ref).
function initial_condition_eoc_test_coupled_euler_gravity(x, t,
equations::CompressibleEulerEquations3D)
# OBS! this assumes that γ = 2 other manufactured source terms are incorrect
- if equations.gamma != 2.0
+ if equations.gamma != 2
error("adiabatic constant must be 2 for the coupling convergence test")
end
- c = 2.0
- A = 0.1
- ini = c + A * sin(pi * (x[1] + x[2] + x[3] - t))
- G = 1.0 # gravitational constant
+ RealT = eltype(x)
+ c = 2
+ A = convert(RealT, 0.1)
+ ini = c + A * sin(convert(RealT, pi) * (x[1] + x[2] + x[3] - t))
+ G = 1 # gravitational constant
rho = ini
- v1 = 1.0
- v2 = 1.0
- v3 = 1.0
- p = ini^2 * G * 2 / (3 * pi) # "3" is the number of spatial dimensions
+ v1 = 1
+ v2 = 1
+ v3 = 1
+ p = ini^2 * G * 2 / (3 * convert(RealT, pi)) # "3" is the number of spatial dimensions
return prim2cons(SVector(rho, v1, v2, v3, p), equations)
end
@@ -204,15 +209,16 @@ in combination with [`initial_condition_eoc_test_coupled_euler_gravity`](@ref).
@inline function source_terms_eoc_test_coupled_euler_gravity(u, x, t,
equations::CompressibleEulerEquations3D)
# Same settings as in `initial_condition_eoc_test_coupled_euler_gravity`
- c = 2.0
- A = 0.1
- G = 1.0 # gravitational constant, must match coupling solver
- C_grav = -4 * G / (3 * pi) # "3" is the number of spatial dimensions # 2D: -2.0*G/pi
+ RealT = eltype(u)
+ c = 2
+ A = convert(RealT, 0.1)
+ G = 1 # gravitational constant, must match coupling solver
+ C_grav = -4 * G / (3 * convert(RealT, pi)) # "3" is the number of spatial dimensions # 2D: -2.0*G/pi
x1, x2, x3 = x
# TODO: sincospi
- si, co = sincos(pi * (x1 + x2 + x3 - t))
- rhox = A * pi * co
+ si, co = sincos(convert(RealT, pi) * (x1 + x2 + x3 - t))
+ rhox = A * convert(RealT, pi) * co
rho = c + A * si
# In "2 * rhox", the "2" is "number of spatial dimensions minus one"
@@ -220,7 +226,7 @@ in combination with [`initial_condition_eoc_test_coupled_euler_gravity`](@ref).
du2 = 2 * rhox
du3 = 2 * rhox
du4 = 2 * rhox
- du5 = 2 * rhox * (3 / 2 - C_grav * rho) # "3" in "3/2" is the number of spatial dimensions
+ du5 = 2 * rhox * (1.5f0 - C_grav * rho) # "3" in "3/2" is the number of spatial dimensions
return SVector(du1, du2, du3, du4, du5)
end
@@ -241,15 +247,16 @@ in combination with [`initial_condition_eoc_test_coupled_euler_gravity`](@ref).
"""
function source_terms_eoc_test_euler(u, x, t, equations::CompressibleEulerEquations3D)
# Same settings as in `initial_condition_eoc_test_coupled_euler_gravity`
- c = 2.0
- A = 0.1
- G = 1.0
- C_grav = -4 * G / (3 * pi) # "3" is the number of spatial dimensions
+ RealT = eltype(u)
+ c = 2
+ A = convert(RealT, 0.1)
+ G = 1
+ C_grav = -4 * G / (3 * convert(RealT, pi)) # "3" is the number of spatial dimensions
x1, x2, x3 = x
# TODO: sincospi
- si, co = sincos(pi * (x1 + x2 + x3 - t))
- rhox = A * pi * co
+ si, co = sincos(convert(RealT, pi) * (x1 + x2 + x3 - t))
+ rhox = A * convert(RealT, pi) * co
rho = c + A * si
du1 = rhox * 2
@@ -309,26 +316,26 @@ Details about the 1D pressure Riemann solution can be found in Section 6.3.3 of
# Eleuterio F. Toro (2009)
# Riemann Solvers and Numerical Methods for Fluid Dynamics: A Practical Introduction
# [DOI: 10.1007/b79761](https://doi.org/10.1007/b79761)
- if v_normal <= 0.0
+ if v_normal <= 0
sound_speed = sqrt(equations.gamma * p_local / rho_local) # local sound speed
p_star = p_local *
- (1 + 0.5 * (equations.gamma - 1) * v_normal / sound_speed)^(2 *
- equations.gamma *
- equations.inv_gamma_minus_one)
- else # v_normal > 0.0
+ (1 + 0.5f0 * (equations.gamma - 1) * v_normal / sound_speed)^(2 *
+ equations.gamma *
+ equations.inv_gamma_minus_one)
+ else # v_normal > 0
A = 2 / ((equations.gamma + 1) * rho_local)
B = p_local * (equations.gamma - 1) / (equations.gamma + 1)
p_star = p_local +
- 0.5 * v_normal / A *
+ 0.5f0 * v_normal / A *
(v_normal + sqrt(v_normal^2 + 4 * A * (p_local + B)))
end
# For the slip wall we directly set the flux as the normal velocity is zero
- return SVector(zero(eltype(u_inner)),
+ return SVector(0,
p_star * normal[1],
p_star * normal[2],
p_star * normal[3],
- zero(eltype(u_inner))) * norm_
+ 0) * norm_
end
"""
@@ -342,12 +349,13 @@ Should be used together with [`TreeMesh`](@ref).
surface_flux_function,
equations::CompressibleEulerEquations3D)
# get the appropriate normal vector from the orientation
+ RealT = eltype(u_inner)
if orientation == 1
- normal_direction = SVector(1.0, 0.0, 0.0)
+ normal_direction = SVector(one(RealT), zero(RealT), zero(RealT))
elseif orientation == 2
- normal_direction = SVector(0.0, 1.0, 0.0)
+ normal_direction = SVector(zero(RealT), one(RealT), zero(RealT))
else # orientation == 3
- normal_direction = SVector(0.0, 0.0, 1.0)
+ normal_direction = SVector(zero(RealT), zero(RealT), one(RealT))
end
# compute and return the flux using `boundary_condition_slip_wall` routine above
@@ -387,7 +395,7 @@ end
v2 = rho_v2 / rho
v3 = rho_v3 / rho
p = (equations.gamma - 1) *
- (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
+ (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
if orientation == 1
f1 = rho_v1
f2 = rho_v1 * v1 + p
@@ -410,17 +418,18 @@ end
return SVector(f1, f2, f3, f4, f5)
end
-@inline function flux(u, normal::AbstractVector,
+@inline function flux(u, normal_direction::AbstractVector,
equations::CompressibleEulerEquations3D)
rho_e = last(u)
rho, v1, v2, v3, p = cons2prim(u, equations)
- v_normal = v1 * normal[1] + v2 * normal[2] + v3 * normal[3]
+ v_normal = v1 * normal_direction[1] + v2 * normal_direction[2] +
+ v3 * normal_direction[3]
rho_v_normal = rho * v_normal
f1 = rho_v_normal
- f2 = rho_v_normal * v1 + p * normal[1]
- f3 = rho_v_normal * v2 + p * normal[2]
- f4 = rho_v_normal * v3 + p * normal[3]
+ f2 = rho_v_normal * v1 + p * normal_direction[1]
+ f3 = rho_v_normal * v2 + p * normal_direction[2]
+ f4 = rho_v_normal * v3 + p * normal_direction[3]
f5 = (rho_e + p) * v_normal
return SVector(f1, f2, f3, f4, f5)
end
@@ -448,30 +457,30 @@ The modification is in the energy flux to guarantee pressure equilibrium and was
rho_rr, v1_rr, v2_rr, v3_rr, p_rr = cons2prim(u_rr, equations)
# Average each factor of products in flux
- rho_avg = 1 / 2 * (rho_ll + rho_rr)
- v1_avg = 1 / 2 * (v1_ll + v1_rr)
- v2_avg = 1 / 2 * (v2_ll + v2_rr)
- v3_avg = 1 / 2 * (v3_ll + v3_rr)
- p_avg = 1 / 2 * (p_ll + p_rr)
- kin_avg = 1 / 2 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ kin_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
# Calculate fluxes depending on orientation
if orientation == 1
- pv1_avg = 1 / 2 * (p_ll * v1_rr + p_rr * v1_ll)
+ pv1_avg = 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
f1 = rho_avg * v1_avg
f2 = f1 * v1_avg + p_avg
f3 = f1 * v2_avg
f4 = f1 * v3_avg
f5 = p_avg * v1_avg * equations.inv_gamma_minus_one + f1 * kin_avg + pv1_avg
elseif orientation == 2
- pv2_avg = 1 / 2 * (p_ll * v2_rr + p_rr * v2_ll)
+ pv2_avg = 0.5f0 * (p_ll * v2_rr + p_rr * v2_ll)
f1 = rho_avg * v2_avg
f2 = f1 * v1_avg
f3 = f1 * v2_avg + p_avg
f4 = f1 * v3_avg
f5 = p_avg * v2_avg * equations.inv_gamma_minus_one + f1 * kin_avg + pv2_avg
else
- pv3_avg = 1 / 2 * (p_ll * v3_rr + p_rr * v3_ll)
+ pv3_avg = 0.5f0 * (p_ll * v3_rr + p_rr * v3_ll)
f1 = rho_avg * v3_avg
f2 = f1 * v1_avg
f3 = f1 * v2_avg
@@ -493,13 +502,13 @@ end
v3_rr * normal_direction[3]
# Average each factor of products in flux
- rho_avg = 1 / 2 * (rho_ll + rho_rr)
- v1_avg = 1 / 2 * (v1_ll + v1_rr)
- v2_avg = 1 / 2 * (v2_ll + v2_rr)
- v3_avg = 1 / 2 * (v3_ll + v3_rr)
- v_dot_n_avg = 1 / 2 * (v_dot_n_ll + v_dot_n_rr)
- p_avg = 1 / 2 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
+ v_dot_n_avg = 0.5f0 * (v_dot_n_ll + v_dot_n_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
# Calculate fluxes depending on normal_direction
f1 = rho_avg * v_dot_n_avg
@@ -508,7 +517,7 @@ end
f4 = f1 * v3_avg + p_avg * normal_direction[3]
f5 = (f1 * velocity_square_avg +
p_avg * v_dot_n_avg * equations.inv_gamma_minus_one
- + 0.5 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
+ + 0.5f0 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
return SVector(f1, f2, f3, f4, f5)
end
@@ -532,12 +541,12 @@ Kinetic energy preserving two-point flux by
rho_rr, v1_rr, v2_rr, v3_rr, p_rr = cons2prim(u_rr, equations)
# Average each factor of products in flux
- rho_avg = 0.5 * (rho_ll + rho_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- v3_avg = 0.5 * (v3_ll + v3_rr)
- p_avg = 0.5 * (p_ll + p_rr)
- e_avg = 0.5 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ e_avg = 0.5f0 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
# Calculate fluxes depending on orientation
if orientation == 1
@@ -579,17 +588,17 @@ end
v3_rr = rho_v3_rr / rho_rr
# Average each factor of products in flux
- rho_avg = 0.5 * (rho_ll + rho_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- v3_avg = 0.5 * (v3_ll + v3_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
v_dot_n_avg = v1_avg * normal_direction[1] + v2_avg * normal_direction[2] +
v3_avg * normal_direction[3]
- p_avg = 0.5 * ((equations.gamma - 1) *
- (rho_e_ll - 0.5 * rho_ll * (v1_ll^2 + v2_ll^2 + v3_ll^2)) +
+ p_avg = 0.5f0 * ((equations.gamma - 1) *
+ (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2 + v2_ll^2 + v3_ll^2)) +
(equations.gamma - 1) *
- (rho_e_rr - 0.5 * rho_rr * (v1_rr^2 + v2_rr^2 + v3_rr^2)))
- e_avg = 0.5 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
+ (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2 + v2_rr^2 + v3_rr^2)))
+ e_avg = 0.5f0 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr)
# Calculate fluxes depending on normal_direction
f1 = rho_avg * v_dot_n_avg
@@ -616,20 +625,20 @@ Entropy conserving two-point flux by
rho_ll, v1_ll, v2_ll, v3_ll, p_ll = cons2prim(u_ll, equations)
rho_rr, v1_rr, v2_rr, v3_rr, p_rr = cons2prim(u_rr, equations)
- beta_ll = 0.5 * rho_ll / p_ll
- beta_rr = 0.5 * rho_rr / p_rr
- specific_kin_ll = 0.5 * (v1_ll^2 + v2_ll^2 + v3_ll^2)
- specific_kin_rr = 0.5 * (v1_rr^2 + v2_rr^2 + v3_rr^2)
+ beta_ll = 0.5f0 * rho_ll / p_ll
+ beta_rr = 0.5f0 * rho_rr / p_rr
+ specific_kin_ll = 0.5f0 * (v1_ll^2 + v2_ll^2 + v3_ll^2)
+ specific_kin_rr = 0.5f0 * (v1_rr^2 + v2_rr^2 + v3_rr^2)
# Compute the necessary mean values
- rho_avg = 0.5 * (rho_ll + rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
rho_mean = ln_mean(rho_ll, rho_rr)
beta_mean = ln_mean(beta_ll, beta_rr)
- beta_avg = 0.5 * (beta_ll + beta_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- v3_avg = 0.5 * (v3_ll + v3_rr)
- p_mean = 0.5 * rho_avg / beta_avg
+ beta_avg = 0.5f0 * (beta_ll + beta_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
+ p_mean = 0.5f0 * rho_avg / beta_avg
velocity_square_avg = specific_kin_ll + specific_kin_rr
# Calculate fluxes depending on orientation
@@ -638,21 +647,24 @@ Entropy conserving two-point flux by
f2 = f1 * v1_avg + p_mean
f3 = f1 * v2_avg
f4 = f1 * v3_avg
- f5 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f5 = f1 * 0.5f0 *
+ (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg + f3 * v2_avg + f4 * v3_avg
elseif orientation == 2
f1 = rho_mean * v2_avg
f2 = f1 * v1_avg
f3 = f1 * v2_avg + p_mean
f4 = f1 * v3_avg
- f5 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f5 = f1 * 0.5f0 *
+ (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg + f3 * v2_avg + f4 * v3_avg
else
f1 = rho_mean * v3_avg
f2 = f1 * v1_avg
f3 = f1 * v2_avg
f4 = f1 * v3_avg + p_mean
- f5 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f5 = f1 * 0.5f0 *
+ (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg + f3 * v2_avg + f4 * v3_avg
end
@@ -670,28 +682,28 @@ end
v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] +
v3_rr * normal_direction[3]
- beta_ll = 0.5 * rho_ll / p_ll
- beta_rr = 0.5 * rho_rr / p_rr
- specific_kin_ll = 0.5 * (v1_ll^2 + v2_ll^2 + v3_ll^2)
- specific_kin_rr = 0.5 * (v1_rr^2 + v2_rr^2 + v3_rr^2)
+ beta_ll = 0.5f0 * rho_ll / p_ll
+ beta_rr = 0.5f0 * rho_rr / p_rr
+ specific_kin_ll = 0.5f0 * (v1_ll^2 + v2_ll^2 + v3_ll^2)
+ specific_kin_rr = 0.5f0 * (v1_rr^2 + v2_rr^2 + v3_rr^2)
# Compute the necessary mean values
- rho_avg = 0.5 * (rho_ll + rho_rr)
+ rho_avg = 0.5f0 * (rho_ll + rho_rr)
rho_mean = ln_mean(rho_ll, rho_rr)
beta_mean = ln_mean(beta_ll, beta_rr)
- beta_avg = 0.5 * (beta_ll + beta_rr)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- v3_avg = 0.5 * (v3_ll + v3_rr)
- p_mean = 0.5 * rho_avg / beta_avg
+ beta_avg = 0.5f0 * (beta_ll + beta_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
+ p_mean = 0.5f0 * rho_avg / beta_avg
velocity_square_avg = specific_kin_ll + specific_kin_rr
# Multiply with average of normal velocities
- f1 = rho_mean * 0.5 * (v_dot_n_ll + v_dot_n_rr)
+ f1 = rho_mean * 0.5f0 * (v_dot_n_ll + v_dot_n_rr)
f2 = f1 * v1_avg + p_mean * normal_direction[1]
f3 = f1 * v2_avg + p_mean * normal_direction[2]
f4 = f1 * v3_avg + p_mean * normal_direction[3]
- f5 = f1 * 0.5 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
+ f5 = f1 * 0.5f0 * (1 / (equations.gamma - 1) / beta_mean - velocity_square_avg) +
f2 * v1_avg + f3 * v2_avg + f4 * v3_avg
return SVector(f1, f2, f3, f4, f5)
@@ -725,11 +737,11 @@ See also
# log((ϱₗ/pₗ) / (ϱᵣ/pᵣ)) / (ϱₗ/pₗ - ϱᵣ/pᵣ)
# = pₗ pᵣ log((ϱₗ pᵣ) / (ϱᵣ pₗ)) / (ϱₗ pᵣ - ϱᵣ pₗ)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- v3_avg = 0.5 * (v3_ll + v3_rr)
- p_avg = 0.5 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
# Calculate fluxes depending on orientation
if orientation == 1
@@ -739,7 +751,7 @@ See also
f4 = f1 * v3_avg
f5 = f1 *
(velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) +
- 0.5 * (p_ll * v1_rr + p_rr * v1_ll)
+ 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
elseif orientation == 2
f1 = rho_mean * v2_avg
f2 = f1 * v1_avg
@@ -747,7 +759,7 @@ See also
f4 = f1 * v3_avg
f5 = f1 *
(velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) +
- 0.5 * (p_ll * v2_rr + p_rr * v2_ll)
+ 0.5f0 * (p_ll * v2_rr + p_rr * v2_ll)
else # orientation == 3
f1 = rho_mean * v3_avg
f2 = f1 * v1_avg
@@ -755,7 +767,7 @@ See also
f4 = f1 * v3_avg + p_avg
f5 = f1 *
(velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) +
- 0.5 * (p_ll * v3_rr + p_rr * v3_ll)
+ 0.5f0 * (p_ll * v3_rr + p_rr * v3_ll)
end
return SVector(f1, f2, f3, f4, f5)
@@ -778,20 +790,20 @@ end
# log((ϱₗ/pₗ) / (ϱᵣ/pᵣ)) / (ϱₗ/pₗ - ϱᵣ/pᵣ)
# = pₗ pᵣ log((ϱₗ pᵣ) / (ϱᵣ pₗ)) / (ϱₗ pᵣ - ϱᵣ pₗ)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- v3_avg = 0.5 * (v3_ll + v3_rr)
- p_avg = 0.5 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v3_avg = 0.5f0 * (v3_ll + v3_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr + v3_ll * v3_rr)
# Calculate fluxes depending on normal_direction
- f1 = rho_mean * 0.5 * (v_dot_n_ll + v_dot_n_rr)
+ f1 = rho_mean * 0.5f0 * (v_dot_n_ll + v_dot_n_rr)
f2 = f1 * v1_avg + p_avg * normal_direction[1]
f3 = f1 * v2_avg + p_avg * normal_direction[2]
f4 = f1 * v3_avg + p_avg * normal_direction[3]
f5 = (f1 * (velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one)
+
- 0.5 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
+ 0.5f0 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll))
return SVector(f1, f2, f3, f4, f5)
end
@@ -834,7 +846,7 @@ end
v2 = rho_v2 / rho
v3 = rho_v3 / rho
p = (equations.gamma - 1) *
- (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
+ (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
a = sqrt(equations.gamma * p / rho)
if orientation == 1
@@ -848,13 +860,15 @@ end
alpha_p = 2 * (equations.gamma - 1) * lambda1_p + lambda2_p + lambda3_p
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1p = rho_2gamma * alpha_p
f2p = rho_2gamma * (alpha_p * v1 + a * (lambda2_p - lambda3_p))
f3p = rho_2gamma * alpha_p * v2
f4p = rho_2gamma * alpha_p * v3
f5p = rho_2gamma *
- (alpha_p * 0.5 * (v1^2 + v2^2 + v3^2) + a * v1 * (lambda2_p - lambda3_p)
+ (alpha_p * 0.5f0 * (v1^2 + v2^2 + v3^2) +
+ a * v1 *
+ (lambda2_p - lambda3_p)
+ a^2 * (lambda2_p + lambda3_p) * equations.inv_gamma_minus_one)
elseif orientation == 2
lambda1 = v2
@@ -867,13 +881,15 @@ end
alpha_p = 2 * (equations.gamma - 1) * lambda1_p + lambda2_p + lambda3_p
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1p = rho_2gamma * alpha_p
f2p = rho_2gamma * alpha_p * v1
f3p = rho_2gamma * (alpha_p * v2 + a * (lambda2_p - lambda3_p))
f4p = rho_2gamma * alpha_p * v3
f5p = rho_2gamma *
- (alpha_p * 0.5 * (v1^2 + v2^2 + v3^2) + a * v2 * (lambda2_p - lambda3_p)
+ (alpha_p * 0.5f0 * (v1^2 + v2^2 + v3^2) +
+ a * v2 *
+ (lambda2_p - lambda3_p)
+ a^2 * (lambda2_p + lambda3_p) * equations.inv_gamma_minus_one)
else # orientation == 3
lambda1 = v3
@@ -886,13 +902,15 @@ end
alpha_p = 2 * (equations.gamma - 1) * lambda1_p + lambda2_p + lambda3_p
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1p = rho_2gamma * alpha_p
f2p = rho_2gamma * alpha_p * v1
f3p = rho_2gamma * alpha_p * v2
f4p = rho_2gamma * (alpha_p * v3 + a * (lambda2_p - lambda3_p))
f5p = rho_2gamma *
- (alpha_p * 0.5 * (v1^2 + v2^2 + v3^2) + a * v3 * (lambda2_p - lambda3_p)
+ (alpha_p * 0.5f0 * (v1^2 + v2^2 + v3^2) +
+ a * v3 *
+ (lambda2_p - lambda3_p)
+ a^2 * (lambda2_p + lambda3_p) * equations.inv_gamma_minus_one)
end
return SVector(f1p, f2p, f3p, f4p, f5p)
@@ -905,7 +923,7 @@ end
v2 = rho_v2 / rho
v3 = rho_v3 / rho
p = (equations.gamma - 1) *
- (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
+ (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
a = sqrt(equations.gamma * p / rho)
if orientation == 1
@@ -919,13 +937,15 @@ end
alpha_m = 2 * (equations.gamma - 1) * lambda1_m + lambda2_m + lambda3_m
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1m = rho_2gamma * alpha_m
f2m = rho_2gamma * (alpha_m * v1 + a * (lambda2_m - lambda3_m))
f3m = rho_2gamma * alpha_m * v2
f4m = rho_2gamma * alpha_m * v3
f5m = rho_2gamma *
- (alpha_m * 0.5 * (v1^2 + v2^2 + v3^2) + a * v1 * (lambda2_m - lambda3_m)
+ (alpha_m * 0.5f0 * (v1^2 + v2^2 + v3^2) +
+ a * v1 *
+ (lambda2_m - lambda3_m)
+ a^2 * (lambda2_m + lambda3_m) * equations.inv_gamma_minus_one)
elseif orientation == 2
lambda1 = v2
@@ -938,13 +958,15 @@ end
alpha_m = 2 * (equations.gamma - 1) * lambda1_m + lambda2_m + lambda3_m
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1m = rho_2gamma * alpha_m
f2m = rho_2gamma * alpha_m * v1
f3m = rho_2gamma * (alpha_m * v2 + a * (lambda2_m - lambda3_m))
f4m = rho_2gamma * alpha_m * v3
f5m = rho_2gamma *
- (alpha_m * 0.5 * (v1^2 + v2^2 + v3^2) + a * v2 * (lambda2_m - lambda3_m)
+ (alpha_m * 0.5f0 * (v1^2 + v2^2 + v3^2) +
+ a * v2 *
+ (lambda2_m - lambda3_m)
+ a^2 * (lambda2_m + lambda3_m) * equations.inv_gamma_minus_one)
else # orientation == 3
lambda1 = v3
@@ -957,13 +979,15 @@ end
alpha_m = 2 * (equations.gamma - 1) * lambda1_m + lambda2_m + lambda3_m
- rho_2gamma = 0.5 * rho / equations.gamma
+ rho_2gamma = 0.5f0 * rho / equations.gamma
f1m = rho_2gamma * alpha_m
f2m = rho_2gamma * alpha_m * v1
f3m = rho_2gamma * alpha_m * v2
f4m = rho_2gamma * (alpha_m * v3 + a * (lambda2_m - lambda3_m))
f5m = rho_2gamma *
- (alpha_m * 0.5 * (v1^2 + v2^2 + v3^2) + a * v3 * (lambda2_m - lambda3_m)
+ (alpha_m * 0.5f0 * (v1^2 + v2^2 + v3^2) +
+ a * v3 *
+ (lambda2_m - lambda3_m)
+ a^2 * (lambda2_m + lambda3_m) * equations.inv_gamma_minus_one)
end
return SVector(f1m, f2m, f3m, f4m, f5m)
@@ -1001,9 +1025,9 @@ References:
v_rr = v3_rr
end
- rho = 0.5 * (rho_ll + rho_rr)
- p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll)
- v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll)
+ rho = 0.5f0 * (rho_ll + rho_rr)
+ p = 0.5f0 * (p_ll + p_rr) - 0.5f0 * c * rho * (v_rr - v_ll)
+ v = 0.5f0 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll)
# We treat the energy term analogous to the potential temperature term in the paper by
# Chen et al., i.e. we use p_ll and p_rr, and not p
@@ -1043,9 +1067,9 @@ end
# and then multiplying v by `norm_` again, but this version is slightly faster.
norm_ = norm(normal_direction)
- rho = 0.5 * (rho_ll + rho_rr)
- p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) / norm_
- v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_
+ rho = 0.5f0 * (rho_ll + rho_rr)
+ p = 0.5f0 * (p_ll + p_rr) - 0.5f0 * c * rho * (v_rr - v_ll) / norm_
+ v = 0.5f0 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_
# We treat the energy term analogous to the potential temperature term in the paper by
# Chen et al., i.e. we use p_ll and p_rr, and not p
@@ -1248,7 +1272,7 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
v3_ll = rho_v3_ll / rho_ll
e_ll = rho_e_ll / rho_ll
p_ll = (equations.gamma - 1) *
- (rho_e_ll - 1 / 2 * rho_ll * (v1_ll^2 + v2_ll^2 + v3_ll^2))
+ (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2 + v2_ll^2 + v3_ll^2))
c_ll = sqrt(equations.gamma * p_ll / rho_ll)
v1_rr = rho_v1_rr / rho_rr
@@ -1256,7 +1280,7 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
v3_rr = rho_v3_rr / rho_rr
e_rr = rho_e_rr / rho_rr
p_rr = (equations.gamma - 1) *
- (rho_e_rr - 1 / 2 * rho_rr * (v1_rr^2 + v2_rr^2 + v3_rr^2))
+ (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2 + v2_rr^2 + v3_rr^2))
c_rr = sqrt(equations.gamma * p_rr / rho_rr)
# Obtain left and right fluxes
@@ -1285,19 +1309,19 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
H_ll = (rho_e_ll + p_ll) / rho_ll
H_rr = (rho_e_rr + p_rr) / rho_rr
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag))
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * vel_roe_mag))
Ssl = min(vel_L - c_ll, vel_roe - c_roe)
Ssr = max(vel_R + c_rr, vel_roe + c_roe)
sMu_L = Ssl - vel_L
sMu_R = Ssr - vel_R
- if Ssl >= 0.0
+ if Ssl >= 0
f1 = f_ll[1]
f2 = f_ll[2]
f3 = f_ll[3]
f4 = f_ll[4]
f5 = f_ll[5]
- elseif Ssr <= 0.0
+ elseif Ssr <= 0
f1 = f_rr[1]
f2 = f_rr[2]
f3 = f_rr[3]
@@ -1306,7 +1330,7 @@ function flux_hllc(u_ll, u_rr, orientation::Integer,
else
SStar = (p_rr - p_ll + rho_ll * vel_L * sMu_L - rho_rr * vel_R * sMu_R) /
(rho_ll * sMu_L - rho_rr * sMu_R)
- if Ssl <= 0.0 <= SStar
+ if Ssl <= 0 <= SStar
densStar = rho_ll * sMu_L / (Ssl - SStar)
enerStar = e_ll + (SStar - vel_L) * (SStar + p_ll / (rho_ll * sMu_L))
UStar1 = densStar
@@ -1398,20 +1422,20 @@ function flux_hllc(u_ll, u_rr, normal_direction::AbstractVector,
H_rr = (u_rr[5] + p_rr) / rho_rr
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag)) * norm_
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * vel_roe_mag)) * norm_
Ssl = min(v_dot_n_ll - c_ll, vel_roe - c_roe)
Ssr = max(v_dot_n_rr + c_rr, vel_roe + c_roe)
sMu_L = Ssl - v_dot_n_ll
sMu_R = Ssr - v_dot_n_rr
- if Ssl >= 0.0
+ if Ssl >= 0
f1 = f_ll[1]
f2 = f_ll[2]
f3 = f_ll[3]
f4 = f_ll[4]
f5 = f_ll[5]
- elseif Ssr <= 0.0
+ elseif Ssr <= 0
f1 = f_rr[1]
f2 = f_rr[2]
f3 = f_rr[3]
@@ -1420,7 +1444,7 @@ function flux_hllc(u_ll, u_rr, normal_direction::AbstractVector,
else
SStar = (rho_ll * v_dot_n_ll * sMu_L - rho_rr * v_dot_n_rr * sMu_R +
(p_rr - p_ll) * norm_sq) / (rho_ll * sMu_L - rho_rr * sMu_R)
- if Ssl <= 0.0 <= SStar
+ if Ssl <= 0 <= SStar
densStar = rho_ll * sMu_L / (Ssl - SStar)
enerStar = e_ll +
(SStar - v_dot_n_ll) *
@@ -1501,22 +1525,22 @@ of the numerical flux.
v_roe_mag = v1_roe^2 + v2_roe^2 + v3_roe^2
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) * inv_sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * v_roe_mag))
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * v_roe_mag))
# Compute convenience constant for positivity preservation, see
# https://doi.org/10.1016/0021-9991(91)90211-3
- beta = sqrt(0.5 * (equations.gamma - 1) / equations.gamma)
+ beta = sqrt(0.5f0 * (equations.gamma - 1) / equations.gamma)
# Estimate the edges of the Riemann fan (with positivity conservation)
if orientation == 1 # x-direction
- SsL = min(v1_roe - c_roe, v1_ll - beta * c_ll, zero(v1_roe))
- SsR = max(v1_roe + c_roe, v1_rr + beta * c_rr, zero(v1_roe))
+ SsL = min(v1_roe - c_roe, v1_ll - beta * c_ll, 0)
+ SsR = max(v1_roe + c_roe, v1_rr + beta * c_rr, 0)
elseif orientation == 2 # y-direction
- SsL = min(v2_roe - c_roe, v2_ll - beta * c_ll, zero(v2_roe))
- SsR = max(v2_roe + c_roe, v2_rr + beta * c_rr, zero(v2_roe))
+ SsL = min(v2_roe - c_roe, v2_ll - beta * c_ll, 0)
+ SsR = max(v2_roe + c_roe, v2_rr + beta * c_rr, 0)
else # z-direction
- SsL = min(v3_roe - c_roe, v3_ll - beta * c_ll, zero(v3_roe))
- SsR = max(v3_roe + c_roe, v3_rr + beta * c_rr, zero(v3_roe))
+ SsL = min(v3_roe - c_roe, v3_ll - beta * c_ll, 0)
+ SsR = max(v3_roe + c_roe, v3_rr + beta * c_rr, 0)
end
return SsL, SsR
@@ -1571,15 +1595,15 @@ of the numerical flux.
v_roe_mag = v1_roe^2 + v2_roe^2 + v3_roe^2
H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) * inv_sum_sqrt_rho
- c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * v_roe_mag)) * norm_
+ c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5f0 * v_roe_mag)) * norm_
# Compute convenience constant for positivity preservation, see
# https://doi.org/10.1016/0021-9991(91)90211-3
- beta = sqrt(0.5 * (equations.gamma - 1) / equations.gamma)
+ beta = sqrt(0.5f0 * (equations.gamma - 1) / equations.gamma)
# Estimate the edges of the Riemann fan (with positivity conservation)
- SsL = min(v_roe - c_roe, v_dot_n_ll - beta * c_ll, zero(v_roe))
- SsR = max(v_roe + c_roe, v_dot_n_rr + beta * c_rr, zero(v_roe))
+ SsL = min(v_roe - c_roe, v_dot_n_ll - beta * c_ll, 0)
+ SsR = max(v_roe + c_roe, v_dot_n_rr + beta * c_rr, 0)
return SsL, SsR
end
@@ -1599,7 +1623,7 @@ end
v2 = rho_v2 / rho
v3 = rho_v3 / rho
p = (equations.gamma - 1) *
- (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
+ (rho_e - 0.5f0 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3))
return SVector(rho, v1, v2, v3, p)
end
@@ -1612,11 +1636,12 @@ end
v2 = rho_v2 / rho
v3 = rho_v3 / rho
v_square = v1^2 + v2^2 + v3^2
- p = (equations.gamma - 1) * (rho_e - 0.5 * rho * v_square)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * rho * v_square)
s = log(p) - equations.gamma * log(rho)
rho_p = rho / p
- w1 = (equations.gamma - s) * equations.inv_gamma_minus_one - 0.5 * rho_p * v_square
+ w1 = (equations.gamma - s) * equations.inv_gamma_minus_one -
+ 0.5f0 * rho_p * v_square
w2 = rho_p * v1
w3 = rho_p * v2
w4 = rho_p * v3
@@ -1658,7 +1683,7 @@ end
rho_v2 = rho * v2
rho_v3 = rho * v3
rho_e = p * equations.inv_gamma_minus_one +
- 0.5 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3)
+ 0.5f0 * (rho_v1 * v1 + rho_v2 * v2 + rho_v3 * v3)
return SVector(rho, rho_v1, rho_v2, rho_v3, rho_e)
end
@@ -1669,14 +1694,14 @@ end
@inline function pressure(u, equations::CompressibleEulerEquations3D)
rho, rho_v1, rho_v2, rho_v3, rho_e = u
- p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1^2 + rho_v2^2 + rho_v3^2) / rho)
+ p = (equations.gamma - 1) * (rho_e - 0.5f0 * (rho_v1^2 + rho_v2^2 + rho_v3^2) / rho)
return p
end
@inline function density_pressure(u, equations::CompressibleEulerEquations3D)
rho, rho_v1, rho_v2, rho_v3, rho_e = u
rho_times_p = (equations.gamma - 1) *
- (rho * rho_e - 0.5 * (rho_v1^2 + rho_v2^2 + rho_v3^2))
+ (rho * rho_e - 0.5f0 * (rho_v1^2 + rho_v2^2 + rho_v3^2))
return rho_times_p
end
@@ -1711,7 +1736,7 @@ end
# Calculate kinetic energy for a conservative state `cons`
@inline function energy_kinetic(u, equations::CompressibleEulerEquations3D)
rho, rho_v1, rho_v2, rho_v3, _ = u
- return 0.5 * (rho_v1^2 + rho_v2^2 + rho_v3^2) / rho
+ return 0.5f0 * (rho_v1^2 + rho_v2^2 + rho_v3^2) / rho
end
# Calculate internal energy for a conservative state `cons`
diff --git a/test/runtests.jl b/test/runtests.jl
index 49f0977bb70..836488d0d8e 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -108,6 +108,7 @@ const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3)
@time if TRIXI_TEST == "all" || TRIXI_TEST == "misc_part1"
include("test_unit.jl")
+ include("test_type.jl")
include("test_visualization.jl")
end
diff --git a/test/test_type.jl b/test/test_type.jl
new file mode 100644
index 00000000000..de02ec47110
--- /dev/null
+++ b/test/test_type.jl
@@ -0,0 +1,502 @@
+module TestType
+
+using Test
+using Trixi
+
+include("test_trixi.jl")
+
+# Start with a clean environment: remove Trixi.jl output directory if it exists
+outdir = "out"
+isdir(outdir) && rm(outdir, recursive = true)
+
+# Run unit tests for various equations
+@testset "Test Type Stability" begin
+ @timed_testset "Acoustic Perturbation 2D" begin
+ for RealT in (Float32, Float64)
+ v_mean_global = (zero(RealT), zero(RealT))
+ c_mean_global = one(RealT)
+ rho_mean_global = one(RealT)
+ equations = @inferred AcousticPerturbationEquations2D(v_mean_global,
+ c_mean_global,
+ rho_mean_global)
+
+ x = SVector(zero(RealT), zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = u_inner = SVector(one(RealT), one(RealT), one(RealT),
+ one(RealT),
+ one(RealT),
+ one(RealT), one(RealT))
+ orientations = [1, 2]
+ directions = [1, 2, 3, 4]
+ normal_direction = SVector(one(RealT), zero(RealT))
+
+ surface_flux_function = flux_lax_friedrichs
+ dissipation = DissipationLocalLaxFriedrichs()
+
+ @test eltype(@inferred initial_condition_constant(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_gauss(x, t, equations)) == RealT
+
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+
+ for orientation in orientations
+ for direction in directions
+ if RealT == Float32
+ # check `surface_flux_function` (test broken)
+ @test_broken eltype(boundary_condition_wall(u_inner, orientation,
+ direction, x, t,
+ surface_flux_function,
+ equations)) == RealT
+ else
+ @test eltype(@inferred boundary_condition_wall(u_inner, orientation,
+ direction, x, t,
+ surface_flux_function,
+ equations)) == RealT
+ end
+ end
+ end
+
+ if RealT == Float32
+ # check `surface_flux_function` (test broken)
+ @test_broken eltype(boundary_condition_slip_wall(u_inner, normal_direction,
+ x, t,
+ surface_flux_function,
+ equations)) == RealT
+ else
+ @test eltype(@inferred boundary_condition_slip_wall(u_inner,
+ normal_direction, x, t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ end
+
+ @test eltype(@inferred flux(u, normal_direction, equations)) == RealT
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ @test eltype(@inferred dissipation(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+
+ for orientation in orientations
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ @test eltype(@inferred dissipation(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ end
+
+ @test eltype(@inferred Trixi.max_abs_speeds(u, equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred prim2cons(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ end
+ end
+
+ @timed_testset "Compressible Euler 1D" begin
+ for RealT in (Float32, Float64)
+ # set gamma = 2 for the coupling convergence test
+ equations = @inferred CompressibleEulerEquations1D(RealT(2))
+
+ x = SVector(zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = u_inner = SVector(one(RealT), one(RealT), one(RealT))
+ orientation = 1
+ directions = [1, 2]
+ cons = SVector(one(RealT), one(RealT), one(RealT))
+
+ surface_flux_function = flux_lax_friedrichs
+
+ @test eltype(@inferred initial_condition_constant(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_density_wave(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_weak_blast_wave(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_eoc_test_coupled_euler_gravity(x, t,
+ equations)) ==
+ RealT
+
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+
+ for direction in directions
+ @test eltype(@inferred boundary_condition_slip_wall(u_inner, orientation,
+ direction,
+ x, t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ end
+
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred flux_shima_etal(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred flux_kennedy_gruber(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred flux_hllc(u_ll, u_rr, orientation, equations)) == RealT
+ if RealT == Float32
+ # check `ln_mean` (test broken)
+ @test_broken eltype(flux_chandrashekar(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ end
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ end
+
+ @test eltype(eltype(@inferred splitting_steger_warming(u, orientation,
+ equations))) ==
+ RealT
+ @test eltype(eltype(@inferred splitting_vanleer_haenel(u, orientation,
+ equations))) ==
+ RealT
+ @test eltype(eltype(@inferred splitting_coirier_vanleer(u, orientation,
+ equations))) ==
+ RealT
+
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_naive(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_davis(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_einfeldt(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+
+ @test eltype(@inferred Trixi.max_abs_speeds(u, equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred prim2cons(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test eltype(@inferred entropy2cons(u, equations)) == RealT
+ @test eltype(@inferred density(u, equations)) == RealT
+ @test eltype(@inferred pressure(u, equations)) == RealT
+ @test eltype(@inferred density_pressure(u, equations)) == RealT
+ @test eltype(@inferred entropy(cons, equations)) == RealT
+ @test eltype(@inferred energy_internal(cons, equations)) == RealT
+ end
+ end
+
+ @timed_testset "Compressible Euler 2D" begin
+ for RealT in (Float32, Float64)
+ # set gamma = 2 for the coupling convergence test\
+ equations = @inferred CompressibleEulerEquations2D(RealT(2))
+
+ x = SVector(zero(RealT), zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = u_inner = SVector(one(RealT), one(RealT), one(RealT),
+ one(RealT))
+ orientations = [1, 2]
+ directions = [1, 2, 3, 4]
+ normal_direction = SVector(one(RealT), zero(RealT))
+ cons = SVector(one(RealT), one(RealT), one(RealT), one(RealT))
+
+ surface_flux_function = flux_lax_friedrichs
+ flux_lmars = FluxLMARS(340)
+
+ @test eltype(@inferred initial_condition_constant(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_density_wave(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_weak_blast_wave(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_eoc_test_coupled_euler_gravity(x, t,
+ equations)) ==
+ RealT
+
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+ @test eltype(@inferred source_terms_eoc_test_coupled_euler_gravity(u, x, t,
+ equations)) ==
+ RealT
+ @test eltype(@inferred source_terms_eoc_test_euler(u, x, t, equations)) ==
+ RealT
+
+ for orientation in orientations
+ for direction in directions
+ @test eltype(@inferred boundary_condition_slip_wall(u_inner,
+ orientation,
+ direction, x, t,
+ surface_flux_function,
+ equations)) == RealT
+ end
+ end
+
+ @test eltype(@inferred flux(u, normal_direction, equations)) == RealT
+ @test eltype(@inferred flux_shima_etal(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ @test eltype(@inferred flux_kennedy_gruber(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ @test eltype(@inferred flux_lmars(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ @test eltype(@inferred flux_hllc(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ if RealT == Float32
+ # check `ln_mean` (test broken)
+ @test_broken eltype(flux_chandrashekar(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_chandrashekar(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ end
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(flux_ranocha(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ end
+
+ @test eltype(eltype(@inferred splitting_drikakis_tsangaris(u, normal_direction,
+ equations))) == RealT
+ @test eltype(eltype(@inferred splitting_vanleer_haenel(u, normal_direction,
+ equations))) ==
+ RealT
+ @test eltype(eltype(@inferred splitting_lax_friedrichs(u, normal_direction,
+ equations))) ==
+ RealT
+
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_naive(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_davis(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_einfeldt(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+
+ for orientation in orientations
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred flux_shima_etal(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred flux_kennedy_gruber(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ @test eltype(@inferred flux_lmars(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred flux_hllc(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ if RealT == Float32
+ # check `ln_mean` (test broken)
+ @test_broken eltype(flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ end
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ end
+
+ @test eltype(eltype(@inferred splitting_steger_warming(u, orientation,
+ equations))) ==
+ RealT
+ @test eltype(eltype(@inferred splitting_drikakis_tsangaris(u, orientation,
+ equations))) ==
+ RealT
+ @test eltype(eltype(@inferred splitting_vanleer_haenel(u, orientation,
+ equations))) ==
+ RealT
+ @test eltype(eltype(@inferred splitting_lax_friedrichs(u, orientation,
+ equations))) ==
+ RealT
+
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_naive(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_davis(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_einfeldt(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ end
+
+ @test eltype(@inferred Trixi.max_abs_speeds(u, equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred prim2cons(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test eltype(@inferred entropy2cons(u, equations)) == RealT
+ @test eltype(@inferred Trixi.entropy_guermond_etal(u, equations)) == RealT
+ @test eltype(@inferred Trixi.cons2entropy_guermond_etal(u, equations)) == RealT
+ @test eltype(@inferred density(u, equations)) == RealT
+ @test eltype(@inferred pressure(u, equations)) == RealT
+ @test eltype(@inferred density_pressure(u, equations)) == RealT
+ @test eltype(@inferred entropy(cons, equations)) == RealT
+ @test eltype(@inferred Trixi.entropy_math(cons, equations)) == RealT
+ @test eltype(@inferred Trixi.entropy_thermodynamic(cons, equations)) == RealT
+ @test eltype(@inferred energy_internal(cons, equations)) == RealT
+ # TODO: test `gradient_conservative`, not necessary but good to have
+ end
+ end
+
+ @timed_testset "Compressible Euler 3D" begin
+ for RealT in (Float32, Float64)
+ # set gamma = 2 for the coupling convergence test
+ equations = @inferred CompressibleEulerEquations3D(RealT(2))
+
+ x = SVector(zero(RealT), zero(RealT), zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = u_inner = SVector(one(RealT), one(RealT), one(RealT),
+ one(RealT), one(RealT))
+ orientations = [1, 2, 3]
+ directions = [1, 2, 3, 4, 5, 6]
+ normal_direction = SVector(one(RealT), zero(RealT), zero(RealT))
+ cons = SVector(one(RealT), one(RealT), one(RealT),
+ one(RealT), one(RealT))
+
+ surface_flux_function = flux_lax_friedrichs
+ flux_lmars = FluxLMARS(340)
+
+ @test eltype(@inferred initial_condition_constant(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_weak_blast_wave(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_eoc_test_coupled_euler_gravity(x, t,
+ equations)) ==
+ RealT
+
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+ @test eltype(@inferred source_terms_eoc_test_coupled_euler_gravity(u, x, t,
+ equations)) ==
+ RealT
+ @test eltype(@inferred source_terms_eoc_test_euler(u, x, t, equations)) == RealT
+
+ for orientation in orientations
+ for direction in directions
+ @test eltype(@inferred boundary_condition_slip_wall(u_inner,
+ orientation,
+ direction, x, t,
+ surface_flux_function,
+ equations)) == RealT
+ end
+ end
+
+ @test eltype(@inferred flux(u, normal_direction, equations)) == RealT
+ @test eltype(@inferred flux_shima_etal(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ @test eltype(@inferred flux_kennedy_gruber(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ @test eltype(@inferred flux_lmars(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ @test eltype(@inferred flux_hllc(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ if RealT == Float32
+ # check `ln_mean` (test broken)
+ @test_broken eltype(flux_chandrashekar(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ else
+ @test eltype(@inferred flux_chandrashekar(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ end
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(flux_ranocha(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ end
+
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+ @test eltype(@inferred min_max_speed_naive(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ @test eltype(@inferred min_max_speed_davis(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ @test eltype(@inferred min_max_speed_einfeldt(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+
+ for orientation in orientations
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred flux_shima_etal(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred flux_kennedy_gruber(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ @test eltype(@inferred flux_lmars(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred flux_hllc(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ if RealT == Float32
+ # check `ln_mean` (test broken)
+ @test_broken eltype(flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) == RealT
+ else
+ @test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) == RealT
+ end
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ end
+
+ @test eltype(eltype(@inferred splitting_steger_warming(u, orientation,
+ equations))) ==
+ RealT
+
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ equations)) == RealT
+ @test eltype(@inferred min_max_speed_naive(u_ll, u_rr, orientation,
+ equations)) == RealT
+ @test eltype(@inferred min_max_speed_davis(u_ll, u_rr, orientation,
+ equations)) == RealT
+ @test eltype(@inferred min_max_speed_einfeldt(u_ll, u_rr, orientation,
+ equations)) == RealT
+ end
+
+ @test eltype(@inferred Trixi.max_abs_speeds(u, equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred prim2cons(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test eltype(@inferred entropy2cons(u, equations)) == RealT
+ @test eltype(@inferred density(u, equations)) == RealT
+ @test eltype(@inferred pressure(u, equations)) == RealT
+ @test eltype(@inferred density_pressure(u, equations)) == RealT
+ @test eltype(@inferred entropy(cons, equations)) == RealT
+ @test eltype(@inferred Trixi.entropy_math(cons, equations)) == RealT
+ @test eltype(@inferred Trixi.entropy_thermodynamic(cons, equations)) == RealT
+ @test eltype(@inferred energy_internal(cons, equations)) == RealT
+ end
+ end
+end
+
+end # module
From 1f4da14c0ac19654f12e3ded7045ea24976ca916 Mon Sep 17 00:00:00 2001
From: Benedict <135045760+benegee@users.noreply.github.com>
Date: Thu, 6 Jun 2024 19:05:30 +0200
Subject: [PATCH 30/44] Save and load user_data for p4est (#1915)
* save and load user_data for p4est
this fixes restating when using AMR
* use p4est_reset_data instead
* add test for p4est, restart, and AMR
* adapt errors in test
* 3D = p8!
* return something
* remove coverage_override
* remove coverage_override
* use ode_default_options()
---
.../elixir_advection_restart_amr.jl | 54 +++++++++++++++++++
.../elixir_advection_extended.jl | 3 +-
.../tree_2d_dgsem/elixir_advection_restart.jl | 3 +-
.../elixir_advection_restart_amr.jl | 4 +-
src/auxiliary/p4est.jl | 20 ++++++-
test/test_mpi_tree.jl | 5 +-
test/test_p4est_2d.jl | 19 ++++++-
7 files changed, 98 insertions(+), 10 deletions(-)
create mode 100644 examples/p4est_2d_dgsem/elixir_advection_restart_amr.jl
diff --git a/examples/p4est_2d_dgsem/elixir_advection_restart_amr.jl b/examples/p4est_2d_dgsem/elixir_advection_restart_amr.jl
new file mode 100644
index 00000000000..fd3623dd88b
--- /dev/null
+++ b/examples/p4est_2d_dgsem/elixir_advection_restart_amr.jl
@@ -0,0 +1,54 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# create a restart file
+
+elixir_file = "elixir_advection_extended.jl"
+restart_file = "restart_000021.h5"
+
+trixi_include(@__MODULE__, joinpath(@__DIR__, elixir_file))
+
+###############################################################################
+# adapt the parameters that have changed compared to "elixir_advection_extended.jl"
+
+# Note: If you get a restart file from somewhere else, you need to provide
+# appropriate setups in the elixir loading a restart file
+
+restart_filename = joinpath("out", restart_file)
+mesh = load_mesh(restart_filename)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = boundary_conditions)
+
+tspan = (load_time(restart_filename), 2.0)
+dt = load_dt(restart_filename)
+ode = semidiscretize(semi, tspan, restart_filename);
+
+# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
+save_solution.condition.save_initial_solution = false
+
+# Add AMR callback
+amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first),
+ base_level = 0,
+ med_level = 0, med_threshold = 0.8,
+ max_level = 1, max_threshold = 1.2)
+amr_callback = AMRCallback(semi, amr_controller,
+ interval = 5,
+ adapt_initial_condition = true,
+ adapt_initial_condition_only_refine = true)
+callbacks_ext = CallbackSet(amr_callback, callbacks.discrete_callbacks...)
+
+integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks_ext, maxiters = 100_000);
+
+# Get the last time index and work with that.
+load_timestep!(integrator, restart_filename)
+
+###############################################################################
+# run the simulation
+
+sol = solve!(integrator)
+summary_callback() # print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_advection_extended.jl b/examples/tree_2d_dgsem/elixir_advection_extended.jl
index 4d3da47b04a..50a509c0724 100644
--- a/examples/tree_2d_dgsem/elixir_advection_extended.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_extended.jl
@@ -78,7 +78,8 @@ callbacks = CallbackSet(summary_callback,
alg = CarpenterKennedy2N54(williamson_condition = false)
sol = solve(ode, alg,
dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep = false, callback = callbacks; ode_default_options()...);
+ callback = callbacks;
+ ode_default_options()...); # default options because an adaptive time stepping method is used in test_mpi_tree.jl
# Print the timer summary
summary_callback()
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart.jl b/examples/tree_2d_dgsem/elixir_advection_restart.jl
index e0d1003f524..6052632ecad 100644
--- a/examples/tree_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_restart.jl
@@ -29,7 +29,8 @@ save_solution.condition.save_initial_solution = false
integrator = init(ode, alg,
dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback
- callback = callbacks, maxiters = 100_000; ode_default_options()...)
+ callback = callbacks;
+ ode_default_options()...); # default options because an adaptive time stepping method is used in test_mpi_tree.jl
# Load saved context for adaptive time integrator
if integrator.opts.adaptive
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart_amr.jl b/examples/tree_2d_dgsem/elixir_advection_restart_amr.jl
index 2e4ca38a3fa..f366640ef51 100644
--- a/examples/tree_2d_dgsem/elixir_advection_restart_amr.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_restart_amr.jl
@@ -34,13 +34,13 @@ amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first)
max_level = 5, max_threshold = 1.2)
amr_callback = AMRCallback(semi, amr_controller,
interval = 5,
- adapt_initial_condition = false,
+ adapt_initial_condition = true,
adapt_initial_condition_only_refine = true)
callbacks_ext = CallbackSet(amr_callback, callbacks.discrete_callbacks...)
integrator = init(ode, alg,
dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback
- callback = callbacks_ext, maxiters = 100_000; ode_default_options()...)
+ save_everystep = false, callback = callbacks_ext, maxiters = 100_000)
# Load saved context for adaptive time integrator
if integrator.opts.adaptive
diff --git a/src/auxiliary/p4est.jl b/src/auxiliary/p4est.jl
index 0b826254129..2478dd37b66 100644
--- a/src/auxiliary/p4est.jl
+++ b/src/auxiliary/p4est.jl
@@ -99,14 +99,30 @@ end
function load_p4est(file, ::Val{2})
conn_vec = Vector{Ptr{p4est_connectivity_t}}(undef, 1)
comm = P4est.uses_mpi() ? mpi_comm() : C_NULL # Use Trixi.jl's MPI communicator if p4est supports MPI
- p4est_load_ext(file, comm, 0, 0, 1, 0, C_NULL, pointer(conn_vec))
+ p4est = p4est_load_ext(file,
+ comm,
+ 0, # Size of user data
+ 0, # Flag to load user data
+ 1, # Autopartition: ignore saved partition
+ 0, # Have only rank 0 read headers and bcast them
+ C_NULL, # No pointer to user data
+ pointer(conn_vec))
+ # p4est_load_ext only allocates memory when also data is read
+ # use p4est_reset_data to allocate uninitialized memory
+ p4est_reset_data(p4est,
+ 2 * sizeof(Int), # Use Int-Vector of size 2 as quadrant user data
+ C_NULL, # No init function
+ C_NULL) # No pointer to user data
+ return p4est
end
# 3D
function load_p4est(file, ::Val{3})
conn_vec = Vector{Ptr{p8est_connectivity_t}}(undef, 1)
comm = P4est.uses_mpi() ? mpi_comm() : C_NULL # Use Trixi.jl's MPI communicator if p4est supports MPI
- p8est_load_ext(file, comm, 0, 0, 1, 0, C_NULL, pointer(conn_vec))
+ p4est = p8est_load_ext(file, comm, 0, 0, 1, 0, C_NULL, pointer(conn_vec))
+ p8est_reset_data(p4est, 2 * sizeof(Int), C_NULL, C_NULL)
+ return p4est
end
# Read `p4est` connectivity from Abaqus mesh file (.inp)
diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl
index e6e00b2e6b6..6114e453e56 100644
--- a/test/test_mpi_tree.jl
+++ b/test/test_mpi_tree.jl
@@ -73,9 +73,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows()
@trixi_testset "elixir_advection_restart_amr.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR,
"elixir_advection_restart_amr.jl"),
- l2=[7.870371848717432e-5],
- linf=[0.0007374081713964475],
- coverage_override=(maxiters = 50,))
+ l2=[8.018498574373939e-5],
+ linf=[0.0007307237754662355])
end
# Linear scalar advection with AMR
diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl
index fbc94fdfd6d..28b7057090b 100644
--- a/test/test_p4est_2d.jl
+++ b/test/test_p4est_2d.jl
@@ -97,7 +97,24 @@ end
linf=[6.21489667023134e-5],
# With the default `maxiters = 1` in coverage tests,
# there would be no time steps after the restart.
- coverage_override=(maxiters = 100_000,))
+ coverage_override=(maxiters = 25,))
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
+end
+
+@trixi_testset "elixir_advection_restart_amr.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart_amr.jl"),
+ l2=[2.869137983727866e-6],
+ linf=[3.8353423270964804e-5],
+ # With the default `maxiters = 1` in coverage tests,
+ # there would be no time steps after the restart.
+ coverage_override=(maxiters = 25,))
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
From 2043cfe3f79de2b77124c13d57de96a8ddf0ea52 Mon Sep 17 00:00:00 2001
From: Benedict <135045760+benegee@users.noreply.github.com>
Date: Fri, 7 Jun 2024 15:37:13 +0200
Subject: [PATCH 31/44] Print global number of cells and dofs (#1865)
* switch to global count of cells
* introduce ndofsglobal for generic types as fallback
* switch to ndofsglobal for console output
* add ncellsglobal
ncells was used elsewhere and has to be the local number
* Update src/semidiscretization/semidiscretization.jl
Co-authored-by: Hendrik Ranocha
* remove docstring
* ndofsglobal in analysis callback
* remove unnecessary fallback
* Update src/semidiscretization/semidiscretization.jl
Co-authored-by: Michael Schlottke-Lakemper
* Update src/semidiscretization/semidiscretization_coupled.jl
Co-authored-by: Michael Schlottke-Lakemper
* missing calls to *global functions
* sum up and print global element count per level
* formatter
* simplify
* revert change in relative runtime
* add nelementsglobal in analogy to ndofsglobal
* fix signature
* add mesh parameter to nelementsglobal
* :/
* Update src/callbacks_step/analysis.jl
Co-authored-by: Hendrik Ranocha
---------
Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com>
Co-authored-by: Hendrik Ranocha
Co-authored-by: Michael Schlottke-Lakemper
---
src/callbacks_step/amr.jl | 4 +-
src/callbacks_step/analysis.jl | 91 ++++++-------------
src/callbacks_step/analysis_dgmulti.jl | 7 ++
src/callbacks_step/save_restart_dg.jl | 4 +-
src/callbacks_step/save_solution_dg.jl | 6 +-
src/meshes/p4est_mesh.jl | 3 +-
src/meshes/t8code_mesh.jl | 3 +-
src/semidiscretization/semidiscretization.jl | 14 +++
.../semidiscretization_coupled.jl | 14 ++-
.../semidiscretization_hyperbolic.jl | 2 +-
...semidiscretization_hyperbolic_parabolic.jl | 2 +-
src/solvers/dg.jl | 4 +-
12 files changed, 77 insertions(+), 77 deletions(-)
diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl
index 45f03fba8fe..b0afd02aff8 100644
--- a/src/callbacks_step/amr.jl
+++ b/src/callbacks_step/amr.jl
@@ -228,7 +228,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
if mpi_isparallel()
# Collect lambda for all elements
- lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache))
+ lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(mesh, dg, cache))
# Use parent because n_elements_by_rank is an OffsetArray
recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank))
MPI.Allgatherv!(lambda, recvbuf, mpi_comm())
@@ -380,7 +380,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
error("MPI has not been verified yet for parabolic AMR")
# Collect lambda for all elements
- lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache))
+ lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(mesh, dg, cache))
# Use parent because n_elements_by_rank is an OffsetArray
recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank))
MPI.Allgatherv!(lambda, recvbuf, mpi_comm())
diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl
index 7b4a97c2a79..2c8497dc28d 100644
--- a/src/callbacks_step/analysis.jl
+++ b/src/callbacks_step/analysis.jl
@@ -308,11 +308,11 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi)
mpi_println(" " * " " *
" " *
" PID: " * @sprintf("%10.8e s", performance_index))
- mpi_println(" #DOFs per field:" * @sprintf("% 14d", ndofs(semi)) *
+ mpi_println(" #DOFs per field:" * @sprintf("% 14d", ndofsglobal(semi)) *
" " *
" alloc'd memory: " * @sprintf("%14.3f MiB", memory_use))
mpi_println(" #elements: " *
- @sprintf("% 14d", nelements(mesh, solver, cache)))
+ @sprintf("% 14d", nelementsglobal(mesh, solver, cache)))
# Level information (only show for AMR)
print_amr_information(integrator.opts.callback, mesh, solver, cache)
@@ -494,88 +494,53 @@ function print_amr_information(callbacks, mesh, solver, cache)
# Return early if there is nothing to print
uses_amr(callbacks) || return nothing
- levels = Vector{Int}(undef, nelements(solver, cache))
- min_level = typemax(Int)
- max_level = typemin(Int)
- for element in eachelement(solver, cache)
- current_level = mesh.tree.levels[cache.elements.cell_ids[element]]
- levels[element] = current_level
- min_level = min(min_level, current_level)
- max_level = max(max_level, current_level)
+ # Get global minimum and maximum level from the AMRController
+ min_level = max_level = 0
+ for cb in callbacks.discrete_callbacks
+ if cb.affect! isa AMRCallback
+ min_level = cb.affect!.controller.base_level
+ max_level = cb.affect!.controller.max_level
+ end
end
+ # Get local element count per level
+ elements_per_level = get_elements_per_level(min_level, max_level, mesh, solver,
+ cache)
+
+ # Sum up across all ranks
+ MPI.Reduce!(elements_per_level, +, mpi_root(), mpi_comm())
+
+ # Print
for level in max_level:-1:(min_level + 1)
mpi_println(" ├── level $level: " *
- @sprintf("% 14d", count(==(level), levels)))
+ @sprintf("% 14d", elements_per_level[level + 1 - min_level]))
end
mpi_println(" └── level $min_level: " *
- @sprintf("% 14d", count(==(min_level), levels)))
+ @sprintf("% 14d", elements_per_level[1]))
return nothing
end
-# Print level information only if AMR is enabled
-function print_amr_information(callbacks, mesh::P4estMesh, solver, cache)
-
- # Return early if there is nothing to print
- uses_amr(callbacks) || return nothing
-
+function get_elements_per_level(min_level, max_level, mesh::P4estMesh, solver, cache)
elements_per_level = zeros(P4EST_MAXLEVEL + 1)
for tree in unsafe_wrap_sc(p4est_tree_t, mesh.p4est.trees)
elements_per_level .+= tree.quadrants_per_level
end
- # levels start at zero but Julia's standard indexing starts at 1
- min_level_1 = findfirst(i -> i > 0, elements_per_level)
- max_level_1 = findlast(i -> i > 0, elements_per_level)
-
- # Check if there is at least one level with an element
- if isnothing(min_level_1) || isnothing(max_level_1)
- return nothing
- end
-
- min_level = min_level_1 - 1
- max_level = max_level_1 - 1
-
- for level in max_level:-1:(min_level + 1)
- mpi_println(" ├── level $level: " *
- @sprintf("% 14d", elements_per_level[level + 1]))
- end
- mpi_println(" └── level $min_level: " *
- @sprintf("% 14d", elements_per_level[min_level + 1]))
-
- return nothing
+ return @view(elements_per_level[(min_level + 1):(max_level + 1)])
end
-# Print level information only if AMR is enabled
-function print_amr_information(callbacks, mesh::T8codeMesh, solver, cache)
-
- # Return early if there is nothing to print
- uses_amr(callbacks) || return nothing
-
- # TODO: Switch to global element levels array when MPI supported or find
- # another solution.
+function get_elements_per_level(min_level, max_level, mesh::T8codeMesh, solver, cache)
levels = trixi_t8_get_local_element_levels(mesh.forest)
- min_level = minimum(levels)
- max_level = maximum(levels)
-
- mpi_println(" minlevel = $min_level")
- mpi_println(" maxlevel = $max_level")
-
- if min_level > 0
- elements_per_level = [count(==(l), levels) for l in 1:max_level]
-
- for level in max_level:-1:(min_level + 1)
- mpi_println(" ├── level $level: " *
- @sprintf("% 14d", elements_per_level[level]))
- end
- mpi_println(" └── level $min_level: " *
- @sprintf("% 14d", elements_per_level[min_level]))
- end
+ return [count(==(l), levels) for l in min_level:max_level]
+end
- return nothing
+function get_elements_per_level(min_level, max_level, mesh::TreeMesh, solver, cache)
+ levels = [mesh.tree.levels[cache.elements.cell_ids[element]]
+ for element in eachelement(solver, cache)]
+ return [count(==(l), levels) for l in min_level:max_level]
end
# Iterate over tuples of analysis integrals in a type-stable way using "lispy tuple programming".
diff --git a/src/callbacks_step/analysis_dgmulti.jl b/src/callbacks_step/analysis_dgmulti.jl
index dc294de9e7b..1f0eec2de34 100644
--- a/src/callbacks_step/analysis_dgmulti.jl
+++ b/src/callbacks_step/analysis_dgmulti.jl
@@ -185,6 +185,13 @@ end
SolutionAnalyzer(rd::RefElemData) = rd
nelements(mesh::DGMultiMesh, ::DGMulti, other_args...) = mesh.md.num_elements
+function nelementsglobal(mesh::DGMultiMesh, solver::DGMulti, cache)
+ if mpi_isparallel()
+ error("`nelementsglobal` is not implemented for `DGMultiMesh` when used in parallel with MPI")
+ else
+ return ndofs(mesh, solver, cache)
+ end
+end
function ndofsglobal(mesh::DGMultiMesh, solver::DGMulti, cache)
if mpi_isparallel()
error("`ndofsglobal` is not implemented for `DGMultiMesh` when used in parallel with MPI")
diff --git a/src/callbacks_step/save_restart_dg.jl b/src/callbacks_step/save_restart_dg.jl
index cddeef77bb2..b83402c5f86 100644
--- a/src/callbacks_step/save_restart_dg.jl
+++ b/src/callbacks_step/save_restart_dg.jl
@@ -126,7 +126,7 @@ function save_restart_file_parallel(u, time, dt, timestep,
attributes(file)["equations"] = get_name(equations)
attributes(file)["polydeg"] = polydeg(dg)
attributes(file)["n_vars"] = nvariables(equations)
- attributes(file)["n_elements"] = nelementsglobal(dg, cache)
+ attributes(file)["n_elements"] = nelementsglobal(mesh, dg, cache)
attributes(file)["mesh_type"] = get_name(mesh)
attributes(file)["mesh_file"] = splitdir(mesh.current_filename)[2]
attributes(file)["time"] = convert(Float64, time) # Ensure that `time` is written as a double precision scalar
@@ -239,7 +239,7 @@ function load_restart_file_parallel(mesh::Union{ParallelTreeMesh, ParallelP4estM
if read(attributes(file)["polydeg"]) != polydeg(dg)
error("restart mismatch: polynomial degree in solver differs from value in restart file")
end
- if read(attributes(file)["n_elements"]) != nelementsglobal(dg, cache)
+ if read(attributes(file)["n_elements"]) != nelementsglobal(mesh, dg, cache)
error("restart mismatch: number of elements in solver differs from value in restart file")
end
diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl
index 7367886ca94..deae8f7c930 100644
--- a/src/callbacks_step/save_solution_dg.jl
+++ b/src/callbacks_step/save_solution_dg.jl
@@ -158,7 +158,7 @@ function save_solution_file_parallel(data, time, dt, timestep, n_vars,
attributes(file)["equations"] = get_name(equations)
attributes(file)["polydeg"] = polydeg(dg)
attributes(file)["n_vars"] = n_vars
- attributes(file)["n_elements"] = nelementsglobal(dg, cache)
+ attributes(file)["n_elements"] = nelementsglobal(mesh, dg, cache)
attributes(file)["mesh_type"] = get_name(mesh)
attributes(file)["mesh_file"] = splitdir(mesh.current_filename)[2]
attributes(file)["time"] = convert(Float64, time) # Ensure that `time` is written as a double precision scalar
@@ -183,7 +183,7 @@ function save_solution_file_parallel(data, time, dt, timestep, n_vars,
# Need to create dataset explicitly in parallel case
var = create_dataset(file, "/element_variables_$v",
datatype(eltype(element_variable)),
- dataspace((nelementsglobal(dg, cache),)))
+ dataspace((nelementsglobal(mesh, dg, cache),)))
# Write data of each process in slices (ranks start with 0)
slice = (cum_element_counts[mpi_rank() + 1] + 1):cum_element_counts[mpi_rank() + 2]
@@ -230,7 +230,7 @@ function save_solution_file_on_root(data, time, dt, timestep, n_vars,
attributes(file)["equations"] = get_name(equations)
attributes(file)["polydeg"] = polydeg(dg)
attributes(file)["n_vars"] = n_vars
- attributes(file)["n_elements"] = nelementsglobal(dg, cache)
+ attributes(file)["n_elements"] = nelementsglobal(mesh, dg, cache)
attributes(file)["mesh_type"] = get_name(mesh)
attributes(file)["mesh_file"] = splitdir(mesh.current_filename)[2]
attributes(file)["time"] = convert(Float64, time) # Ensure that `time` is written as a double precision scalar
diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl
index 6bb98196231..2046220aeca 100644
--- a/src/meshes/p4est_mesh.jl
+++ b/src/meshes/p4est_mesh.jl
@@ -94,6 +94,7 @@ end
end
# returns Int32 by default which causes a weird method error when creating the cache
@inline ncells(mesh::P4estMesh) = Int(mesh.p4est.local_num_quadrants[])
+@inline ncellsglobal(mesh::P4estMesh) = Int(mesh.p4est.global_num_quadrants[])
function Base.show(io::IO, mesh::P4estMesh)
print(io, "P4estMesh{", ndims(mesh), ", ", real(mesh), "}")
@@ -105,7 +106,7 @@ function Base.show(io::IO, ::MIME"text/plain", mesh::P4estMesh)
else
setup = [
"#trees" => ntrees(mesh),
- "current #cells" => ncells(mesh),
+ "current #cells" => ncellsglobal(mesh),
"polydeg" => length(mesh.nodes) - 1,
]
summary_box(io,
diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl
index 0af4c6ae023..10a042be3ba 100644
--- a/src/meshes/t8code_mesh.jl
+++ b/src/meshes/t8code_mesh.jl
@@ -80,6 +80,7 @@ const ParallelT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:True}
@inline ntrees(mesh::T8codeMesh) = size(mesh.tree_node_coordinates)[end]
@inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest))
+@inline ncellsglobal(mesh::T8codeMesh) = Int(t8_forest_get_global_num_elements(mesh.forest))
function Base.show(io::IO, mesh::T8codeMesh)
print(io, "T8codeMesh{", ndims(mesh), ", ", real(mesh), "}")
@@ -91,7 +92,7 @@ function Base.show(io::IO, ::MIME"text/plain", mesh::T8codeMesh)
else
setup = [
"#trees" => ntrees(mesh),
- "current #cells" => ncells(mesh),
+ "current #cells" => ncellsglobal(mesh),
"polydeg" => length(mesh.nodes) - 1,
]
summary_box(io,
diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl
index 8518cf27fd3..c6b82d5f37b 100644
--- a/src/semidiscretization/semidiscretization.jl
+++ b/src/semidiscretization/semidiscretization.jl
@@ -15,6 +15,19 @@ Return the number of degrees of freedom associated with each scalar variable.
ndofs(mesh, solver, cache)
end
+"""
+ ndofsglobal(semi::AbstractSemidiscretization)
+
+Return the global number of degrees of freedom associated with each scalar variable across all MPI ranks.
+This is the same as [`ndofs`](@ref) for simulations running in serial or
+parallelized via threads. It will in general be different for simulations
+running in parallel with MPI.
+"""
+@inline function ndofsglobal(semi::AbstractSemidiscretization)
+ mesh, _, solver, cache = mesh_equations_solver_cache(semi)
+ ndofsglobal(mesh, solver, cache)
+end
+
"""
integrate_via_indices(func, u_ode, semi::AbstractSemidiscretization, args...; normalize=true)
@@ -397,6 +410,7 @@ end
# TODO: Taal, document interface?
# New mesh/solver combinations have to implement
# - ndofs(mesh, solver, cache)
+# - ndofsgloabal(mesh, solver, cache)
# - ndims(mesh)
# - nnodes(solver)
# - real(solver)
diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl
index cc629d1a674..c5c21584dca 100644
--- a/src/semidiscretization/semidiscretization_coupled.jl
+++ b/src/semidiscretization/semidiscretization_coupled.jl
@@ -81,7 +81,7 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationCoupled)
semi.semis[i].source_terms)
summary_line(increment_indent(io), "solver", solver |> typeof |> nameof)
end
- summary_line(io, "total #DOFs per field", ndofs(semi))
+ summary_line(io, "total #DOFs per field", ndofsglobal(semi))
summary_footer(io)
end
end
@@ -123,6 +123,18 @@ end
sum(ndofs, semi.semis)
end
+"""
+ ndofsglobal(semi::SemidiscretizationCoupled)
+
+Return the global number of degrees of freedom associated with each scalar variable across all MPI ranks, and summed up over all coupled systems.
+This is the same as [`ndofs`](@ref) for simulations running in serial or
+parallelized via threads. It will in general be different for simulations
+running in parallel with MPI.
+"""
+@inline function ndofsglobal(semi::SemidiscretizationCoupled)
+ sum(ndofsglobal, semi.semis)
+end
+
function compute_coefficients(t, semi::SemidiscretizationCoupled)
@unpack u_indices = semi
diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl
index dcd211671c8..7c82a132a0b 100644
--- a/src/semidiscretization/semidiscretization_hyperbolic.jl
+++ b/src/semidiscretization/semidiscretization_hyperbolic.jl
@@ -315,7 +315,7 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationHyperboli
summary_line(io, "source terms", semi.source_terms)
summary_line(io, "solver", semi.solver |> typeof |> nameof)
- summary_line(io, "total #DOFs per field", ndofs(semi))
+ summary_line(io, "total #DOFs per field", ndofsglobal(semi))
summary_footer(io)
end
end
diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
index 57724374acb..16f8da21c1e 100644
--- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
+++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
@@ -229,7 +229,7 @@ function Base.show(io::IO, ::MIME"text/plain",
summary_line(io, "source terms", semi.source_terms)
summary_line(io, "solver", semi.solver |> typeof |> nameof)
summary_line(io, "parabolic solver", semi.solver_parabolic |> typeof |> nameof)
- summary_line(io, "total #DOFs per field", ndofs(semi))
+ summary_line(io, "total #DOFs per field", ndofsglobal(semi))
summary_footer(io)
end
end
diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl
index 0ab947e697a..fb4c8f182e0 100644
--- a/src/solvers/dg.jl
+++ b/src/solvers/dg.jl
@@ -459,7 +459,7 @@ In particular, not the nodes themselves are returned.
# `mesh` for some combinations of mesh/solver.
@inline nelements(mesh, dg::DG, cache) = nelements(dg, cache)
@inline function ndofsglobal(mesh, dg::DG, cache)
- nelementsglobal(dg, cache) * nnodes(dg)^ndims(mesh)
+ nelementsglobal(mesh, dg, cache) * nnodes(dg)^ndims(mesh)
end
"""
@@ -517,7 +517,7 @@ In particular, not the mortars themselves are returned.
@inline eachmpimortar(dg::DG, cache) = Base.OneTo(nmpimortars(dg, cache))
@inline nelements(dg::DG, cache) = nelements(cache.elements)
-@inline function nelementsglobal(dg::DG, cache)
+@inline function nelementsglobal(mesh, dg::DG, cache)
mpi_isparallel() ? cache.mpi_cache.n_elements_global : nelements(dg, cache)
end
@inline ninterfaces(dg::DG, cache) = ninterfaces(cache.interfaces)
From fc15c3873d2c0174db1f0a38aab13ff5e6132379 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Mon, 10 Jun 2024 09:14:11 +0200
Subject: [PATCH 32/44] fix links to JuliaCon HPC minisymposium (#1972)
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 87075c885f9..ee6e45f92a2 100644
--- a/README.md
+++ b/README.md
@@ -30,12 +30,12 @@ from the Trixi Framework ecosystem:
* [**Towards Aerodynamic Simulations in Julia with Trixi.jl**](https://pretalx.com/juliacon2024/talk/XH8KBG/),
[*Daniel Doehring*](https://github.com/danieldoehring/),
10th July 2024, 15:00pm–15:30pm, While Loop (4.2)
-* [**libtrixi: serving legacy codes in earth system modeling with fresh Julia CFD**](https://pretalx.com/juliacon2024/talk/JBKVGF/),
+* [**libtrixi: serving legacy codes in earth system modeling with fresh Julia CFD**](https://pretalx.com/juliacon2024/talk/SXC7LA/),
[*Benedict Geihe*](https://github.com/benegee/),
12th July 2024, 14:00pm–17:00pm, Function (4.1)
The last talk is part of the
-[Julia for High-Performance Computing](https://pretalx.com/juliacon2024/talk/JBKVGF/)
+[Julia for High-Performance Computing](https://juliacon.org/2024/minisymposia/hpc/)
minisymposium, which this year is hosted by our own [*Hendrik Ranocha*](https://github.com/ranocha/).
We are looking forward to seeing you there ♥️
From 23c70ba6ba19b1e69668d3b1b7b6ef0ebac08b5e Mon Sep 17 00:00:00 2001
From: Jesse Chan <1156048+jlchan@users.noreply.github.com>
Date: Mon, 10 Jun 2024 17:28:08 -0500
Subject: [PATCH 33/44] Fix `entropy` for quasi-1D compressible Euler (#1974)
* Fix `entropy` for quasi-1D compressible Euler
Fixes https://github.com/trixi-framework/Trixi.jl/pull/1947/files#r1628569760
* Update test_tree_1d_euler.jl
adding a test
* Update src/equations/compressible_euler_quasi_1d.jl
Co-authored-by: Hendrik Ranocha
* Update test/test_tree_1d_euler.jl
Co-authored-by: Hendrik Ranocha
* Update test/test_tree_1d_euler.jl
Co-authored-by: Hendrik Ranocha
* fix formatting
* fix formatting again
---------
Co-authored-by: Hendrik Ranocha
---
src/equations/compressible_euler_quasi_1d.jl | 6 ++----
test/test_tree_1d_euler.jl | 9 +++++++++
2 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/src/equations/compressible_euler_quasi_1d.jl b/src/equations/compressible_euler_quasi_1d.jl
index 6844bf9bee5..9c7e3a7269b 100644
--- a/src/equations/compressible_euler_quasi_1d.jl
+++ b/src/equations/compressible_euler_quasi_1d.jl
@@ -314,10 +314,8 @@ end
# 1D compressible Euler equations scaled by the channel width `a`.
@inline function entropy(u, equations::CompressibleEulerEquationsQuasi1D)
a_rho, a_rho_v1, a_e, a = u
- q = a * entropy(SVector(a_rho, a_rho_v1, a_e) / a,
- CompressibleEulerEquations1D(equations.gamma))
-
- return SVector(q[1], q[2], q[3], a)
+ return a * entropy(SVector(a_rho, a_rho_v1, a_e) / a,
+ CompressibleEulerEquations1D(equations.gamma))
end
# Convert conservative variables to entropy. The entropy variables for the
diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl
index 784d123128e..dc523586f89 100644
--- a/test/test_tree_1d_euler.jl
+++ b/test/test_tree_1d_euler.jl
@@ -409,6 +409,14 @@ end
end
end
+@trixi_testset "test_quasi_1D_entropy" begin
+ a = 0.9
+ u_1D = SVector(1.1, 0.2, 2.1)
+ u_quasi_1D = SVector(a * 1.1, a * 0.2, a * 2.1, a)
+ @test entropy(u_quasi_1D, CompressibleEulerEquationsQuasi1D(1.4)) ≈
+ a * entropy(u_1D, CompressibleEulerEquations1D(1.4))
+end
+
@trixi_testset "elixir_euler_quasi_1d_source_terms.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_quasi_1d_source_terms.jl"),
l2=[
@@ -423,6 +431,7 @@ end
1.821888865105592e-6,
1.1166012159335992e-7,
])
+
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
From d81849a9803afa438c8f2c1837b8c6af16b9999a Mon Sep 17 00:00:00 2001
From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com>
Date: Tue, 11 Jun 2024 13:57:02 +0200
Subject: [PATCH 34/44] Move timers and `@trixi_timeit` to TrixiBase.jl (#1970)
* Move timers and `@trixi_timeit` to TrixiBase.jl
* Remove unused import
---
Project.toml | 2 +-
src/Trixi.jl | 4 ++--
src/auxiliary/auxiliary.jl | 40 --------------------------------------
3 files changed, 3 insertions(+), 43 deletions(-)
diff --git a/Project.toml b/Project.toml
index 9df96d6efa5..a6911d4e2c3 100644
--- a/Project.toml
+++ b/Project.toml
@@ -104,7 +104,7 @@ TimerOutputs = "0.5.7"
Triangulate = "2.2"
TriplotBase = "0.1"
TriplotRecipes = "0.1"
-TrixiBase = "0.1.1"
+TrixiBase = "0.1.3"
UUIDs = "1.6"
julia = "1.8"
diff --git a/src/Trixi.jl b/src/Trixi.jl
index 3a882d0962c..b8364eef445 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -64,13 +64,13 @@ using Static: Static, One, True, False
using StaticArrays: StaticArrays, MVector, MArray, SMatrix, @SMatrix
using StrideArrays: PtrArray, StrideArray, StaticInt
@reexport using StructArrays: StructArrays, StructArray
-using TimerOutputs: TimerOutputs, @notimeit, TimerOutput, print_timer, reset_timer!
+using TimerOutputs: TimerOutputs, @notimeit, print_timer, reset_timer!
using Triangulate: Triangulate, TriangulateIO
export TriangulateIO # for type parameter in DGMultiMesh
using TriplotBase: TriplotBase
using TriplotRecipes: DGTriPseudocolor
@reexport using TrixiBase: trixi_include
-using TrixiBase: TrixiBase
+using TrixiBase: TrixiBase, @trixi_timeit, timer
@reexport using SimpleUnPack: @unpack
using SimpleUnPack: @pack!
using DataStructures: BinaryHeap, FasterForward, extract_all!
diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl
index 972a748c56b..6259e936737 100644
--- a/src/auxiliary/auxiliary.jl
+++ b/src/auxiliary/auxiliary.jl
@@ -4,19 +4,6 @@
include("containers.jl")
include("math.jl")
-# Enable debug timings `@trixi_timeit timer() "name" stuff...`.
-# This allows us to disable timings completely by executing
-# `TimerOutputs.disable_debug_timings(Trixi)`
-# and to enable them again by executing
-# `TimerOutputs.enable_debug_timings(Trixi)`
-timeit_debug_enabled() = true
-
-# Store main timer for global timing of functions
-const main_timer = TimerOutput()
-
-# Always call timer() to hide implementation details
-timer() = main_timer
-
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
# Since these FMAs can increase the performance of many numerical algorithms,
# we need to opt-in explicitly.
@@ -249,33 +236,6 @@ macro threaded(expr)
end)
end
-# @trixi_timeit timer() "some label" expression
-#
-# Basically the same as a special case of `@timeit_debug` from
-# [TimerOutputs.jl](https://github.com/KristofferC/TimerOutputs.jl),
-# but without `try ... finally ... end` block. Thus, it's not exception-safe,
-# but it also avoids some related performance problems. Since we do not use
-# exception handling in Trixi.jl, that's not really an issue.
-macro trixi_timeit(timer_output, label, expr)
- timeit_block = quote
- if timeit_debug_enabled()
- local to = $(esc(timer_output))
- local enabled = to.enabled
- if enabled
- local accumulated_data = $(TimerOutputs.push!)(to, $(esc(label)))
- end
- local b₀ = $(TimerOutputs.gc_bytes)()
- local t₀ = $(TimerOutputs.time_ns)()
- end
- local val = $(esc(expr))
- if timeit_debug_enabled() && enabled
- $(TimerOutputs.do_accumulate!)(accumulated_data, t₀, b₀)
- $(TimerOutputs.pop!)(to)
- end
- val
- end
-end
-
"""
@autoinfiltrate
@autoinfiltrate condition::Bool
From c090422740c78c0d096c76f016cad67c4540f26e Mon Sep 17 00:00:00 2001
From: Michael Schlottke-Lakemper
Date: Fri, 14 Jun 2024 08:39:01 +0200
Subject: [PATCH 35/44] Update Michael's affiliation to U of Augsburg (#1977)
* Update Michael's affiliation to U of Augsburg
* Update docs
* Update zenodo
* Update authors
* Fix email address
* Update URLs
---
.zenodo.json | 2 +-
AUTHORS.md | 4 ++--
CODE_OF_CONDUCT.md | 2 +-
README.md | 4 ++--
docs/src/index.md | 4 ++--
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/.zenodo.json b/.zenodo.json
index 905c0170ab9..863c586df4d 100644
--- a/.zenodo.json
+++ b/.zenodo.json
@@ -5,7 +5,7 @@
"upload_type": "software",
"creators": [
{
- "affiliation": "Applied and Computational Mathematics, RWTH Aachen University, Germany",
+ "affiliation": "High-Performance Scientific Computing, University of Augsburg, Germany",
"name": "Schlottke-Lakemper, Michael",
"orcid": "0000-0002-3195-2536"
},
diff --git a/AUTHORS.md b/AUTHORS.md
index f1debf8ba76..8e3afcf8671 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -7,8 +7,8 @@ provided substantial additions or modifications. Together, these two groups form
"The Trixi.jl Authors" as mentioned in the [LICENSE.md](LICENSE.md) file.
## Principal Developers
-* [Michael Schlottke-Lakemper](https://lakemper.eu),
- RWTH Aachen University/High-Performance Computing Center Stuttgart (HLRS), Germany
+* [Michael Schlottke-Lakemper](https://www.uni-augsburg.de/fakultaet/mntf/math/prof/hpsc),
+ University of Augsburg, Germany
* [Gregor Gassner](https://www.mi.uni-koeln.de/NumSim/gregor-gassner),
University of Cologne, Germany
* [Hendrik Ranocha](https://ranocha.de),
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 37e79e3175b..e0bdd968873 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -60,7 +60,7 @@ representative at an online or offline event.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to
-[Michael Schlottke-Lakemper](mailto:m.schlottke-lakemper@acom.rwth-aachen.de),
+[Michael Schlottke-Lakemper](mailto:michael.schlottke-lakemper@uni-a.de),
[Hendrik Ranocha](mailto:mail@ranocha.de),
or any other of the principal developers responsible for enforcement listed in
[AUTHORS.md](AUTHORS.md).
diff --git a/README.md b/README.md
index ee6e45f92a2..e0706d4fc7b 100644
--- a/README.md
+++ b/README.md
@@ -259,8 +259,8 @@ In addition, you can also refer to Trixi.jl directly as
## Authors
Trixi.jl was initiated by [Michael
-Schlottke-Lakemper](https://lakemper.eu)
-(RWTH Aachen University/High-Performance Computing Center Stuttgart (HLRS), Germany) and
+Schlottke-Lakemper](https://www.uni-augsburg.de/fakultaet/mntf/math/prof/hpsc)
+(University of Augsburg, Germany) and
[Gregor Gassner](https://www.mi.uni-koeln.de/NumSim/gregor-gassner)
(University of Cologne, Germany). Together with [Hendrik Ranocha](https://ranocha.de)
(Johannes Gutenberg University Mainz, Germany), [Andrew Winters](https://liu.se/en/employee/andwi94)
diff --git a/docs/src/index.md b/docs/src/index.md
index fbb4b36b224..869caaed85f 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -323,8 +323,8 @@ In addition, you can also refer to Trixi.jl directly as
## [Authors](@id authors-index-md)
Trixi.jl was initiated by [Michael
-Schlottke-Lakemper](https://lakemper.eu)
-(RWTH Aachen University/High-Performance Computing Center Stuttgart (HLRS), Germany) and
+Schlottke-Lakemper](https://www.uni-augsburg.de/fakultaet/mntf/math/prof/hpsc)
+(University of Augsburg, Germany) and
[Gregor Gassner](https://www.mi.uni-koeln.de/NumSim/gregor-gassner)
(University of Cologne, Germany). Together with [Hendrik Ranocha](https://ranocha.de)
(Johannes Gutenberg University Mainz, Germany) and [Andrew Winters](https://liu.se/en/employee/andwi94)
From 19b10f4f271ca61e72837fc7ca47a1b4fc99542c Mon Sep 17 00:00:00 2001
From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com>
Date: Fri, 14 Jun 2024 10:19:25 +0100
Subject: [PATCH 36/44] specify version of JuliaFormatter in docs (#1976)
Co-authored-by: Hendrik Ranocha
---
docs/src/styleguide.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/src/styleguide.md b/docs/src/styleguide.md
index 2f28dbfcb17..83d4dfee1bb 100644
--- a/docs/src/styleguide.md
+++ b/docs/src/styleguide.md
@@ -51,7 +51,7 @@ PRs that verify that running JuliaFormatter.jl again will not change the source
To format your contributions before created a PR (or, at least, before requesting a review
of your PR), you need to install JuliaFormatter.jl first by running
```shell
-julia -e 'using Pkg; Pkg.add("JuliaFormatter")'
+julia -e 'using Pkg; Pkg.add(PackageSpec(name = "JuliaFormatter", version="1.0.45"))'
```
You can then recursively format the core Julia files in the Trixi.jl repo by executing
```shell
From 8b3a6135b45720b2d59983253f8a315727ac92c7 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Fri, 14 Jun 2024 15:53:33 +0200
Subject: [PATCH 37/44] solve! instead of solve_steps! (#1982)
Co-authored-by: Hendrik Ranocha
---
.../paired_explicit_runge_kutta/methods_PERK2.jl | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
index b3b917dc18d..1d9680153e6 100644
--- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
+++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
@@ -273,10 +273,10 @@ function solve(ode::ODEProblem, alg::PairedExplicitRK2;
integrator = init(ode, alg, dt = dt, callback = callback; kwargs...)
# Start actual solve
- solve_steps!(integrator)
+ solve!(integrator)
end
-function solve_steps!(integrator::PairedExplicitRK2Integrator)
+function solve!(integrator::PairedExplicitRK2Integrator)
@unpack prob = integrator.sol
integrator.finalstep = false
From 7b57dd52dbee224bdeba352cbda44178cab89e2f Mon Sep 17 00:00:00 2001
From: Huiyu Xie
Date: Fri, 14 Jun 2024 03:57:10 -1000
Subject: [PATCH 38/44] Add numerical support of other real types (continue)
(#1947)
* start main continue
* rerun CI tests
* test CI tests
* complete the first
* compressible euler quasi 1d
* compressible euler multicomponent 1d
* compressible euler multicomponent 2d
* complete unit tests
* fix comments
* complete
* apply strict type check
Co-authored-by: Hendrik Ranocha
* cover all
* cover before
* add me
---------
Co-authored-by: Hendrik Ranocha
---
AUTHORS.md | 1 +
.../compressible_euler_multicomponent_1d.jl | 160 +++++-----
.../compressible_euler_multicomponent_2d.jl | 204 +++++++------
src/equations/compressible_euler_quasi_1d.jl | 52 ++--
test/test_type.jl | 286 +++++++++++++++---
5 files changed, 458 insertions(+), 245 deletions(-)
diff --git a/AUTHORS.md b/AUTHORS.md
index 8e3afcf8671..54d63216335 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -43,3 +43,4 @@ are listed in alphabetical order:
* Michael Schlottke-Lakemper
* Toskan Theine
* Andrew Winters
+* Huiyu Xie
diff --git a/src/equations/compressible_euler_multicomponent_1d.jl b/src/equations/compressible_euler_multicomponent_1d.jl
index 6338e04c3ed..d4f6b421f7c 100644
--- a/src/equations/compressible_euler_multicomponent_1d.jl
+++ b/src/equations/compressible_euler_multicomponent_1d.jl
@@ -123,14 +123,15 @@ A smooth initial condition used for convergence tests in combination with
"""
function initial_condition_convergence_test(x, t,
equations::CompressibleEulerMulticomponentEquations1D)
+ RealT = eltype(x)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- omega = 2 * pi * f
+ f = 1.0f0 / L
+ omega = 2 * convert(RealT, pi) * f
ini = c + A * sin(omega * (x[1] - t))
- v1 = 1.0
+ v1 = 1
rho = ini
@@ -144,7 +145,7 @@ function initial_condition_convergence_test(x, t,
prim1 = rho * v1
prim2 = rho^2
- prim_other = SVector{2, real(equations)}(prim1, prim2)
+ prim_other = SVector(prim1, prim2)
return vcat(prim_other, prim_rho)
end
@@ -159,26 +160,27 @@ Source terms used for convergence tests in combination with
@inline function source_terms_convergence_test(u, x, t,
equations::CompressibleEulerMulticomponentEquations1D)
# Same settings as in `initial_condition`
+ RealT = eltype(u)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- omega = 2 * pi * f
+ f = 1.0f0 / L
+ omega = 2 * convert(RealT, pi) * f
gamma = totalgamma(u, equations)
x1, = x
si, co = sincos((t - x1) * omega)
- tmp = (-((4 * si * A - 4c) + 1) * (gamma - 1) * co * A * omega) / 2
+ tmp = (-((4 * si * A - 4 * c) + 1) * (gamma - 1) * co * A * omega) / 2
# Here we compute an arbitrary number of different rhos. (one rho is double the next rho while the sum of all rhos is 1
- du_rho = SVector{ncomponents(equations), real(equations)}(0.0
+ du_rho = SVector{ncomponents(equations), real(equations)}(0
for i in eachcomponent(equations))
du1 = tmp
du2 = tmp
- du_other = SVector{2, real(equations)}(du1, du2)
+ du_other = SVector(du1, du2)
return vcat(du_other, du_rho)
end
@@ -194,26 +196,27 @@ A for multicomponent adapted weak blast wave adapted to multicomponent and taken
function initial_condition_weak_blast_wave(x, t,
equations::CompressibleEulerMulticomponentEquations1D)
# From Hennemann & Gassner JCP paper 2020 (Sec. 6.3)
- inicenter = SVector(0.0)
+ RealT = eltype(x)
+ inicenter = SVector(0)
x_norm = x[1] - inicenter[1]
r = abs(x_norm)
- cos_phi = x_norm > 0 ? one(x_norm) : -one(x_norm)
+ cos_phi = x_norm > 0 ? 1 : -1
- prim_rho = SVector{ncomponents(equations), real(equations)}(r > 0.5 ?
+ prim_rho = SVector{ncomponents(equations), real(equations)}(r > 0.5f0 ?
2^(i - 1) * (1 - 2) /
(1 -
2^ncomponents(equations)) *
- 1.0 :
+ 1 :
2^(i - 1) * (1 - 2) /
(1 -
2^ncomponents(equations)) *
- 1.1691
+ convert(RealT, 1.1691)
for i in eachcomponent(equations))
- v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi
- p = r > 0.5 ? 1.0 : 1.245
+ v1 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * cos_phi
+ p = r > 0.5f0 ? one(RealT) : convert(RealT, 1.245)
- prim_other = SVector{2, real(equations)}(v1, p)
+ prim_other = SVector(v1, p)
return prim2cons(vcat(prim_other, prim_rho), equations)
end
@@ -227,13 +230,13 @@ end
v1 = rho_v1 / rho
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * rho * v1^2)
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * v1^2)
f_rho = densities(u, v1, equations)
f1 = rho_v1 * v1 + p
f2 = (rho_e + p) * v1
- f_other = SVector{2, real(equations)}(f1, f2)
+ f_other = SVector(f1, f2)
return vcat(f_other, f_rho)
end
@@ -255,7 +258,7 @@ Entropy conserving two-point flux by
rhok_mean = SVector{ncomponents(equations), real(equations)}(ln_mean(u_ll[i + 2],
u_rr[i + 2])
for i in eachcomponent(equations))
- rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5 * (u_ll[i + 2] +
+ rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5f0 * (u_ll[i + 2] +
u_rr[i + 2])
for i in eachcomponent(equations))
@@ -269,13 +272,14 @@ Entropy conserving two-point flux by
# extract velocities
v1_ll = rho_v1_ll / rho_ll
v1_rr = rho_v1_rr / rho_rr
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v1_square = 0.5 * (v1_ll^2 + v1_rr^2)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v1_square = 0.5f0 * (v1_ll^2 + v1_rr^2)
v_sum = v1_avg
- enth = zero(v_sum)
- help1_ll = zero(v1_ll)
- help1_rr = zero(v1_rr)
+ RealT = eltype(u_ll)
+ enth = zero(RealT)
+ help1_ll = zero(RealT)
+ help1_rr = zero(RealT)
for i in eachcomponent(equations)
enth += rhok_avg[i] * gas_constants[i]
@@ -283,14 +287,14 @@ Entropy conserving two-point flux by
help1_rr += u_rr[i + 2] * cv[i]
end
- T_ll = (rho_e_ll - 0.5 * rho_ll * (v1_ll^2)) / help1_ll
- T_rr = (rho_e_rr - 0.5 * rho_rr * (v1_rr^2)) / help1_rr
- T = 0.5 * (1.0 / T_ll + 1.0 / T_rr)
- T_log = ln_mean(1.0 / T_ll, 1.0 / T_rr)
+ T_ll = (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2)) / help1_ll
+ T_rr = (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2)) / help1_rr
+ T = 0.5f0 * (1 / T_ll + 1 / T_rr)
+ T_log = ln_mean(1 / T_ll, 1 / T_rr)
# Calculate fluxes depending on orientation
- help1 = zero(T_ll)
- help2 = zero(T_rr)
+ help1 = zero(RealT)
+ help2 = zero(RealT)
f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * v1_avg
for i in eachcomponent(equations))
@@ -299,9 +303,9 @@ Entropy conserving two-point flux by
help2 += f_rho[i]
end
f1 = (help2) * v1_avg + enth / T
- f2 = (help1) / T_log - 0.5 * (v1_square) * (help2) + v1_avg * f1
+ f2 = (help1) / T_log - 0.5f0 * (v1_square) * (help2) + v1_avg * f1
- f_other = SVector{2, real(equations)}(f1, f2)
+ f_other = SVector(f1, f2)
return vcat(f_other, f_rho)
end
@@ -330,7 +334,7 @@ See also
rhok_mean = SVector{ncomponents(equations), real(equations)}(ln_mean(u_ll[i + 2],
u_rr[i + 2])
for i in eachcomponent(equations))
- rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5 * (u_ll[i + 2] +
+ rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5f0 * (u_ll[i + 2] +
u_rr[i + 2])
for i in eachcomponent(equations))
@@ -339,25 +343,26 @@ See also
rho_rr = density(u_rr, equations)
# Calculating gamma
- gamma = totalgamma(0.5 * (u_ll + u_rr), equations)
+ gamma = totalgamma(0.5f0 * (u_ll + u_rr), equations)
inv_gamma_minus_one = 1 / (gamma - 1)
# extract velocities
v1_ll = rho_v1_ll / rho_ll
v1_rr = rho_v1_rr / rho_rr
- v1_avg = 0.5 * (v1_ll + v1_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr)
# density flux
f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * v1_avg
for i in eachcomponent(equations))
# helpful variables
- f_rho_sum = zero(v1_ll)
- help1_ll = zero(v1_ll)
- help1_rr = zero(v1_rr)
- enth_ll = zero(v1_ll)
- enth_rr = zero(v1_rr)
+ RealT = eltype(u_ll)
+ f_rho_sum = zero(RealT)
+ help1_ll = zero(RealT)
+ help1_rr = zero(RealT)
+ enth_ll = zero(RealT)
+ enth_rr = zero(RealT)
for i in eachcomponent(equations)
enth_ll += u_ll[i + 2] * gas_constants[i]
enth_rr += u_rr[i + 2] * gas_constants[i]
@@ -367,18 +372,18 @@ See also
end
# temperature and pressure
- T_ll = (rho_e_ll - 0.5 * rho_ll * (v1_ll^2)) / help1_ll
- T_rr = (rho_e_rr - 0.5 * rho_rr * (v1_rr^2)) / help1_rr
+ T_ll = (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2)) / help1_ll
+ T_rr = (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2)) / help1_rr
p_ll = T_ll * enth_ll
p_rr = T_rr * enth_rr
- p_avg = 0.5 * (p_ll + p_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
# momentum and energy flux
f1 = f_rho_sum * v1_avg + p_avg
f2 = f_rho_sum * (velocity_square_avg + inv_rho_p_mean * inv_gamma_minus_one) +
- 0.5 * (p_ll * v1_rr + p_rr * v1_ll)
- f_other = SVector{2, real(equations)}(f1, f2)
+ 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
+ f_other = SVector(f1, f2)
return vcat(f_other, f_rho)
end
@@ -398,8 +403,8 @@ end
v_ll = rho_v1_ll / rho_ll
v_rr = rho_v1_rr / rho_rr
- p_ll = (gamma_ll - 1) * (rho_e_ll - 1 / 2 * rho_ll * v_ll^2)
- p_rr = (gamma_rr - 1) * (rho_e_rr - 1 / 2 * rho_rr * v_rr^2)
+ p_ll = (gamma_ll - 1) * (rho_e_ll - 0.5f0 * rho_ll * v_ll^2)
+ p_rr = (gamma_rr - 1) * (rho_e_rr - 0.5f0 * rho_rr * v_rr^2)
c_ll = sqrt(gamma_ll * p_ll / rho_ll)
c_rr = sqrt(gamma_rr * p_rr / rho_rr)
@@ -414,7 +419,7 @@ end
v1 = rho_v1 / rho
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 1 / 2 * rho * (v1^2))
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * (v1^2))
c = sqrt(gamma * p / rho)
return (abs(v1) + c,)
@@ -431,8 +436,8 @@ end
v1 = rho_v1 / rho
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * rho * (v1^2))
- prim_other = SVector{2, real(equations)}(v1, p)
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * (v1^2))
+ prim_other = SVector(v1, p)
return vcat(prim_other, prim_rho)
end
@@ -451,7 +456,7 @@ end
rho_v1 = rho * v1
- rho_e = p / (gamma - 1) + 0.5 * (rho_v1 * v1)
+ rho_e = p / (gamma - 1) + 0.5f0 * (rho_v1 * v1)
cons_other = SVector{2, RealT}(rho_v1, rho_e)
@@ -466,8 +471,9 @@ end
rho = density(u, equations)
- help1 = zero(rho)
- gas_constant = zero(rho)
+ RealT = eltype(u)
+ help1 = zero(RealT)
+ gas_constant = zero(RealT)
for i in eachcomponent(equations)
help1 += u[i + 2] * cv[i]
gas_constant += gas_constants[i] * (u[i + 2] / rho)
@@ -477,10 +483,10 @@ end
v_square = v1^2
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * rho * v_square)
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * v_square)
s = log(p) - gamma * log(rho) - log(gas_constant)
rho_p = rho / p
- T = (rho_e - 0.5 * rho * v_square) / (help1)
+ T = (rho_e - 0.5f0 * rho * v_square) / (help1)
entrop_rho = SVector{ncomponents(equations), real(equations)}((cv[i] *
(1 - log(T)) +
@@ -492,7 +498,7 @@ end
w1 = gas_constant * v1 * rho_p
w2 = gas_constant * (-rho_p)
- entrop_other = SVector{2, real(equations)}(w1, w2)
+ entrop_other = SVector(w1, w2)
return vcat(entrop_other, entrop_rho)
end
@@ -507,14 +513,15 @@ end
(-cv[i] *
log(-w[2]) -
cp[i] + w[i + 2] -
- 0.5 * w[1]^2 /
+ 0.5f0 * w[1]^2 /
w[2]))
for i in eachcomponent(equations))
- rho = zero(cons_rho[1])
- help1 = zero(cons_rho[1])
- help2 = zero(cons_rho[1])
- p = zero(cons_rho[1])
+ RealT = eltype(w)
+ rho = zero(RealT)
+ help1 = zero(RealT)
+ help2 = zero(RealT)
+ p = zero(RealT)
for i in eachcomponent(equations)
rho += cons_rho[i]
help1 += cons_rho[i] * cv[i] * gammas[i]
@@ -523,8 +530,8 @@ end
end
u1 = rho * v1
gamma = help1 / help2
- u2 = p / (gamma - 1) + 0.5 * rho * v1^2
- cons_other = SVector{2, real(equations)}(u1, u2)
+ u2 = p / (gamma - 1) + 0.5f0 * rho * v1^2
+ cons_other = SVector(u1, u2)
return vcat(cons_other, cons_rho)
end
@@ -534,7 +541,8 @@ end
rho = density(u, equations)
T = temperature(u, equations)
- total_entropy = zero(u[1])
+ RealT = eltype(u)
+ total_entropy = zero(RealT)
for i in eachcomponent(equations)
total_entropy -= u[i + 2] * (cv[i] * log(T) - gas_constants[i] * log(u[i + 2]))
end
@@ -548,7 +556,9 @@ end
rho_v1, rho_e = u
rho = density(u, equations)
- help1 = zero(rho)
+
+ RealT = eltype(u)
+ help1 = zero(RealT)
for i in eachcomponent(equations)
help1 += u[i + 2] * cv[i]
@@ -556,7 +566,7 @@ end
v1 = rho_v1 / rho
v_square = v1^2
- T = (rho_e - 0.5 * rho * v_square) / help1
+ T = (rho_e - 0.5f0 * rho * v_square) / help1
return T
end
@@ -570,8 +580,9 @@ partial density fractions as well as the partial specific heats at constant volu
@inline function totalgamma(u, equations::CompressibleEulerMulticomponentEquations1D)
@unpack cv, gammas = equations
- help1 = zero(u[1])
- help2 = zero(u[1])
+ RealT = eltype(u)
+ help1 = zero(RealT)
+ help2 = zero(RealT)
for i in eachcomponent(equations)
help1 += u[i + 2] * cv[i] * gammas[i]
@@ -587,13 +598,14 @@ end
rho = density(u, equations)
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * (rho_v1^2) / rho)
+ p = (gamma - 1) * (rho_e - 0.5f0 * (rho_v1^2) / rho)
return p
end
@inline function density(u, equations::CompressibleEulerMulticomponentEquations1D)
- rho = zero(u[1])
+ RealT = eltype(u)
+ rho = zero(RealT)
for i in eachcomponent(equations)
rho += u[i + 2]
diff --git a/src/equations/compressible_euler_multicomponent_2d.jl b/src/equations/compressible_euler_multicomponent_2d.jl
index 60fce222f21..3473f887336 100644
--- a/src/equations/compressible_euler_multicomponent_2d.jl
+++ b/src/equations/compressible_euler_multicomponent_2d.jl
@@ -127,15 +127,16 @@ A smooth initial condition used for convergence tests in combination with
"""
function initial_condition_convergence_test(x, t,
equations::CompressibleEulerMulticomponentEquations2D)
+ RealT = eltype(x)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- omega = 2 * pi * f
+ f = 1.0f0 / L
+ omega = 2 * convert(RealT, pi) * f
ini = c + A * sin(omega * (x[1] + x[2] - t))
- v1 = 1.0
- v2 = 1.0
+ v1 = 1
+ v2 = 1
rho = ini
@@ -150,7 +151,7 @@ function initial_condition_convergence_test(x, t,
prim2 = rho * v2
prim3 = rho^2
- prim_other = SVector{3, real(equations)}(prim1, prim2, prim3)
+ prim_other = SVector(prim1, prim2, prim3)
return vcat(prim_other, prim_rho)
end
@@ -165,11 +166,12 @@ Source terms used for convergence tests in combination with
@inline function source_terms_convergence_test(u, x, t,
equations::CompressibleEulerMulticomponentEquations2D)
# Same settings as in `initial_condition`
+ RealT = eltype(u)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- omega = 2 * pi * f
+ f = 1.0f0 / L
+ omega = 2 * convert(RealT, pi) * f
gamma = totalgamma(u, equations)
@@ -191,9 +193,9 @@ Source terms used for convergence tests in combination with
du1 = tmp5
du2 = tmp5
- du3 = 2 * ((tmp6 - 1.0) * tmp3 + tmp6 * gamma) * tmp1
+ du3 = 2 * ((tmp6 - 1) * tmp3 + tmp6 * gamma) * tmp1
- du_other = SVector{3, real(equations)}(du1, du2, du3)
+ du_other = SVector(du1, du2, du3)
return vcat(du_other, du_rho)
end
@@ -210,29 +212,30 @@ function initial_condition_weak_blast_wave(x, t,
equations::CompressibleEulerMulticomponentEquations2D)
# From Hennemann & Gassner JCP paper 2020 (Sec. 6.3)
# Set up polar coordinates
- inicenter = SVector(0.0, 0.0)
+ RealT = eltype(x)
+ inicenter = SVector(0, 0)
x_norm = x[1] - inicenter[1]
y_norm = x[2] - inicenter[2]
r = sqrt(x_norm^2 + y_norm^2)
phi = atan(y_norm, x_norm)
sin_phi, cos_phi = sincos(phi)
- prim_rho = SVector{ncomponents(equations), real(equations)}(r > 0.5 ?
+ prim_rho = SVector{ncomponents(equations), real(equations)}(r > 0.5f0 ?
2^(i - 1) * (1 - 2) /
(1 -
2^ncomponents(equations)) *
- 1.0 :
+ 1 :
2^(i - 1) * (1 - 2) /
(1 -
2^ncomponents(equations)) *
- 1.1691
+ convert(RealT, 1.1691)
for i in eachcomponent(equations))
- v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi
- v2 = r > 0.5 ? 0.0 : 0.1882 * sin_phi
- p = r > 0.5 ? 1.0 : 1.245
+ v1 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * cos_phi
+ v2 = r > 0.5f0 ? zero(RealT) : convert(RealT, 0.1882) * sin_phi
+ p = r > 0.5f0 ? one(RealT) : convert(RealT, 1.245)
- prim_other = SVector{3, real(equations)}(v1, v2, p)
+ prim_other = SVector(v1, v2, p)
return prim2cons(vcat(prim_other, prim_rho), equations)
end
@@ -247,7 +250,7 @@ end
v1 = rho_v1 / rho
v2 = rho_v2 / rho
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * rho * (v1^2 + v2^2))
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * (v1^2 + v2^2))
if orientation == 1
f_rho = densities(u, v1, equations)
@@ -261,7 +264,7 @@ end
f3 = (rho_e + p) * v2
end
- f_other = SVector{3, real(equations)}(f1, f2, f3)
+ f_other = SVector(f1, f2, f3)
return vcat(f_other, f_rho)
end
@@ -277,14 +280,14 @@ end
v2 = rho_v2 / rho
v_normal = v1 * normal_direction[1] + v2 * normal_direction[2]
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * rho * (v1^2 + v2^2))
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * (v1^2 + v2^2))
f_rho = densities(u, v_normal, equations)
f1 = rho_v1 * v_normal + p * normal_direction[1]
f2 = rho_v2 * v_normal + p * normal_direction[2]
f3 = (rho_e + p) * v_normal
- f_other = SVector{3, real(equations)}(f1, f2, f3)
+ f_other = SVector(f1, f2, f3)
return vcat(f_other, f_rho)
end
@@ -306,7 +309,7 @@ Adaption of the entropy conserving two-point flux by
rhok_mean = SVector{ncomponents(equations), real(equations)}(ln_mean(u_ll[i + 3],
u_rr[i + 3])
for i in eachcomponent(equations))
- rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5 * (u_ll[i + 3] +
+ rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5f0 * (u_ll[i + 3] +
u_rr[i + 3])
for i in eachcomponent(equations))
@@ -319,15 +322,16 @@ Adaption of the entropy conserving two-point flux by
v2_ll = rho_v2_ll / rho_ll
v1_rr = rho_v1_rr / rho_rr
v2_rr = rho_v2_rr / rho_rr
- v1_avg = 0.5 * (v1_ll + v1_rr)
- v2_avg = 0.5 * (v2_ll + v2_rr)
- v1_square = 0.5 * (v1_ll^2 + v1_rr^2)
- v2_square = 0.5 * (v2_ll^2 + v2_rr^2)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ v1_square = 0.5f0 * (v1_ll^2 + v1_rr^2)
+ v2_square = 0.5f0 * (v2_ll^2 + v2_rr^2)
v_sum = v1_avg + v2_avg
- enth = zero(v_sum)
- help1_ll = zero(v1_ll)
- help1_rr = zero(v1_rr)
+ RealT = eltype(u_ll)
+ enth = zero(RealT)
+ help1_ll = zero(RealT)
+ help1_rr = zero(RealT)
for i in eachcomponent(equations)
enth += rhok_avg[i] * gas_constants[i]
@@ -335,14 +339,14 @@ Adaption of the entropy conserving two-point flux by
help1_rr += u_rr[i + 3] * cv[i]
end
- T_ll = (rho_e_ll - 0.5 * rho_ll * (v1_ll^2 + v2_ll^2)) / help1_ll
- T_rr = (rho_e_rr - 0.5 * rho_rr * (v1_rr^2 + v2_rr^2)) / help1_rr
- T = 0.5 * (1.0 / T_ll + 1.0 / T_rr)
- T_log = ln_mean(1.0 / T_ll, 1.0 / T_rr)
+ T_ll = (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2 + v2_ll^2)) / help1_ll
+ T_rr = (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2 + v2_rr^2)) / help1_rr
+ T = 0.5f0 * (1 / T_ll + 1 / T_rr)
+ T_log = ln_mean(1 / T_ll, 1 / T_rr)
# Calculate fluxes depending on orientation
- help1 = zero(T_ll)
- help2 = zero(T_rr)
+ help1 = zero(RealT)
+ help2 = zero(RealT)
if orientation == 1
f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * v1_avg
for i in eachcomponent(equations))
@@ -352,7 +356,7 @@ Adaption of the entropy conserving two-point flux by
end
f1 = (help2) * v1_avg + enth / T
f2 = (help2) * v2_avg
- f3 = (help1) / T_log - 0.5 * (v1_square + v2_square) * (help2) + v1_avg * f1 +
+ f3 = (help1) / T_log - 0.5f0 * (v1_square + v2_square) * (help2) + v1_avg * f1 +
v2_avg * f2
else
f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * v2_avg
@@ -363,10 +367,10 @@ Adaption of the entropy conserving two-point flux by
end
f1 = (help2) * v1_avg
f2 = (help2) * v2_avg + enth / T
- f3 = (help1) / T_log - 0.5 * (v1_square + v2_square) * (help2) + v1_avg * f1 +
+ f3 = (help1) / T_log - 0.5f0 * (v1_square + v2_square) * (help2) + v1_avg * f1 +
v2_avg * f2
end
- f_other = SVector{3, real(equations)}(f1, f2, f3)
+ f_other = SVector(f1, f2, f3)
return vcat(f_other, f_rho)
end
@@ -395,7 +399,7 @@ See also
rhok_mean = SVector{ncomponents(equations), real(equations)}(ln_mean(u_ll[i + 3],
u_rr[i + 3])
for i in eachcomponent(equations))
- rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5 * (u_ll[i + 3] +
+ rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5f0 * (u_ll[i + 3] +
u_rr[i + 3])
for i in eachcomponent(equations))
@@ -404,23 +408,24 @@ See also
rho_rr = density(u_rr, equations)
# Calculating gamma
- gamma = totalgamma(0.5 * (u_ll + u_rr), equations)
+ gamma = totalgamma(0.5f0 * (u_ll + u_rr), equations)
inv_gamma_minus_one = 1 / (gamma - 1)
# extract velocities
v1_ll = rho_v1_ll / rho_ll
v1_rr = rho_v1_rr / rho_rr
- v1_avg = 0.5 * (v1_ll + v1_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
v2_ll = rho_v2_ll / rho_ll
v2_rr = rho_v2_rr / rho_rr
- v2_avg = 0.5 * (v2_ll + v2_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr)
# helpful variables
- help1_ll = zero(v1_ll)
- help1_rr = zero(v1_rr)
- enth_ll = zero(v1_ll)
- enth_rr = zero(v1_rr)
+ RealT = eltype(u_ll)
+ help1_ll = zero(RealT)
+ help1_rr = zero(RealT)
+ enth_ll = zero(RealT)
+ enth_rr = zero(RealT)
for i in eachcomponent(equations)
enth_ll += u_ll[i + 3] * gas_constants[i]
enth_rr += u_rr[i + 3] * gas_constants[i]
@@ -429,14 +434,14 @@ See also
end
# temperature and pressure
- T_ll = (rho_e_ll - 0.5 * rho_ll * (v1_ll^2 + v2_ll^2)) / help1_ll
- T_rr = (rho_e_rr - 0.5 * rho_rr * (v1_rr^2 + v2_rr^2)) / help1_rr
+ T_ll = (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2 + v2_ll^2)) / help1_ll
+ T_rr = (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2 + v2_rr^2)) / help1_rr
p_ll = T_ll * enth_ll
p_rr = T_rr * enth_rr
- p_avg = 0.5 * (p_ll + p_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- f_rho_sum = zero(T_rr)
+ f_rho_sum = zero(RealT)
if orientation == 1
f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * v1_avg
for i in eachcomponent(equations))
@@ -446,7 +451,7 @@ See also
f1 = f_rho_sum * v1_avg + p_avg
f2 = f_rho_sum * v2_avg
f3 = f_rho_sum * (velocity_square_avg + inv_rho_p_mean * inv_gamma_minus_one) +
- 0.5 * (p_ll * v1_rr + p_rr * v1_ll)
+ 0.5f0 * (p_ll * v1_rr + p_rr * v1_ll)
else
f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * v2_avg
for i in eachcomponent(equations))
@@ -456,11 +461,11 @@ See also
f1 = f_rho_sum * v1_avg
f2 = f_rho_sum * v2_avg + p_avg
f3 = f_rho_sum * (velocity_square_avg + inv_rho_p_mean * inv_gamma_minus_one) +
- 0.5 * (p_ll * v2_rr + p_rr * v2_ll)
+ 0.5f0 * (p_ll * v2_rr + p_rr * v2_ll)
end
# momentum and energy flux
- f_other = SVector{3, real(equations)}(f1, f2, f3)
+ f_other = SVector(f1, f2, f3)
return vcat(f_other, f_rho)
end
@@ -474,7 +479,7 @@ end
rhok_mean = SVector{ncomponents(equations), real(equations)}(ln_mean(u_ll[i + 3],
u_rr[i + 3])
for i in eachcomponent(equations))
- rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5 * (u_ll[i + 3] +
+ rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5f0 * (u_ll[i + 3] +
u_rr[i + 3])
for i in eachcomponent(equations))
@@ -483,25 +488,26 @@ end
rho_rr = density(u_rr, equations)
# Calculating gamma
- gamma = totalgamma(0.5 * (u_ll + u_rr), equations)
+ gamma = totalgamma(0.5f0 * (u_ll + u_rr), equations)
inv_gamma_minus_one = 1 / (gamma - 1)
# extract velocities
v1_ll = rho_v1_ll / rho_ll
v1_rr = rho_v1_rr / rho_rr
- v1_avg = 0.5 * (v1_ll + v1_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
v2_ll = rho_v2_ll / rho_ll
v2_rr = rho_v2_rr / rho_rr
- v2_avg = 0.5 * (v2_ll + v2_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr)
+ v2_avg = 0.5f0 * (v2_ll + v2_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr + v2_ll * v2_rr)
v_dot_n_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2]
v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
# helpful variables
- help1_ll = zero(v1_ll)
- help1_rr = zero(v1_rr)
- enth_ll = zero(v1_ll)
- enth_rr = zero(v1_rr)
+ RealT = eltype(u_ll)
+ help1_ll = zero(RealT)
+ help1_rr = zero(RealT)
+ enth_ll = zero(RealT)
+ enth_rr = zero(RealT)
for i in eachcomponent(equations)
enth_ll += u_ll[i + 3] * gas_constants[i]
enth_rr += u_rr[i + 3] * gas_constants[i]
@@ -510,15 +516,15 @@ end
end
# temperature and pressure
- T_ll = (rho_e_ll - 0.5 * rho_ll * (v1_ll^2 + v2_ll^2)) / help1_ll
- T_rr = (rho_e_rr - 0.5 * rho_rr * (v1_rr^2 + v2_rr^2)) / help1_rr
+ T_ll = (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2 + v2_ll^2)) / help1_ll
+ T_rr = (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2 + v2_rr^2)) / help1_rr
p_ll = T_ll * enth_ll
p_rr = T_rr * enth_rr
- p_avg = 0.5 * (p_ll + p_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- f_rho_sum = zero(T_rr)
- f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * 0.5 *
+ f_rho_sum = zero(RealT)
+ f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * 0.5f0 *
(v_dot_n_ll + v_dot_n_rr)
for i in eachcomponent(equations))
for i in eachcomponent(equations)
@@ -527,7 +533,7 @@ end
f1 = f_rho_sum * v1_avg + p_avg * normal_direction[1]
f2 = f_rho_sum * v2_avg + p_avg * normal_direction[2]
f3 = f_rho_sum * (velocity_square_avg + inv_rho_p_mean * inv_gamma_minus_one) +
- 0.5 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll)
+ 0.5f0 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll)
# momentum and energy flux
f_other = SVector(f1, f2, f3)
@@ -557,9 +563,9 @@ end
end
# Compute the sound speeds on the left and right
- p_ll = (gamma_ll - 1) * (rho_e_ll - 1 / 2 * (rho_v1_ll^2 + rho_v2_ll^2) / rho_ll)
+ p_ll = (gamma_ll - 1) * (rho_e_ll - 0.5f0 * (rho_v1_ll^2 + rho_v2_ll^2) / rho_ll)
c_ll = sqrt(gamma_ll * p_ll / rho_ll)
- p_rr = (gamma_rr - 1) * (rho_e_rr - 1 / 2 * (rho_v1_rr^2 + rho_v2_rr^2) / rho_rr)
+ p_rr = (gamma_rr - 1) * (rho_e_rr - 0.5f0 * (rho_v1_rr^2 + rho_v2_rr^2) / rho_rr)
c_rr = sqrt(gamma_rr * p_rr / rho_rr)
λ_max = max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr)
@@ -574,7 +580,7 @@ end
v2 = rho_v2 / rho
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 1 / 2 * rho * (v1^2 + v2^2))
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * (v1^2 + v2^2))
c = sqrt(gamma * p / rho)
return (abs(v1) + c, abs(v2) + c)
@@ -635,8 +641,8 @@ end
v1 = rho_v1 / rho
v2 = rho_v2 / rho
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * rho * (v1^2 + v2^2))
- prim_other = SVector{3, real(equations)}(v1, v2, p)
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * (v1^2 + v2^2))
+ prim_other = SVector(v1, v2, p)
return vcat(prim_other, prim_rho)
end
@@ -649,8 +655,9 @@ end
rho = density(u, equations)
# Multicomponent stuff
- help1 = zero(rho)
- gas_constant = zero(rho)
+ RealT = eltype(u)
+ help1 = zero(RealT)
+ gas_constant = zero(RealT)
for i in eachcomponent(equations)
help1 += u[i + 3] * cv[i]
gas_constant += gas_constants[i] * (u[i + 3] / rho)
@@ -661,10 +668,10 @@ end
v_square = v1^2 + v2^2
gamma = totalgamma(u, equations)
- p = (gamma - 1) * (rho_e - 0.5 * rho * v_square)
+ p = (gamma - 1) * (rho_e - 0.5f0 * rho * v_square)
s = log(p) - gamma * log(rho) - log(gas_constant)
rho_p = rho / p
- T = (rho_e - 0.5 * rho * v_square) / (help1)
+ T = (rho_e - 0.5f0 * rho * v_square) / (help1)
entrop_rho = SVector{ncomponents(equations), real(equations)}((cv[i] *
(1 - log(T)) +
@@ -677,7 +684,7 @@ end
w2 = gas_constant * v2 * rho_p
w3 = gas_constant * (-rho_p)
- entrop_other = SVector{3, real(equations)}(w1, w2, w3)
+ entrop_other = SVector(w1, w2, w3)
return vcat(entrop_other, entrop_rho)
end
@@ -698,10 +705,11 @@ end
1)
for i in eachcomponent(equations))
- rho = zero(cons_rho[1])
- help1 = zero(cons_rho[1])
- help2 = zero(cons_rho[1])
- p = zero(cons_rho[1])
+ RealT = eltype(w)
+ rho = zero(RealT)
+ help1 = zero(RealT)
+ help2 = zero(RealT)
+ p = zero(RealT)
for i in eachcomponent(equations)
rho += cons_rho[i]
help1 += cons_rho[i] * cv[i] * gammas[i]
@@ -711,8 +719,8 @@ end
u1 = rho * v1
u2 = rho * v2
gamma = help1 / help2
- u3 = p / (gamma - 1) + 0.5 * rho * v_squared
- cons_other = SVector{3, real(equations)}(u1, u2, u3)
+ u3 = p / (gamma - 1) + 0.5f0 * rho * v_squared
+ cons_other = SVector(u1, u2, u3)
return vcat(cons_other, cons_rho)
end
@@ -728,9 +736,9 @@ end
rho_v1 = rho * v1
rho_v2 = rho * v2
- rho_e = p / (gamma - 1) + 0.5 * (rho_v1 * v1 + rho_v2 * v2)
+ rho_e = p / (gamma - 1) + 0.5f0 * (rho_v1 * v1 + rho_v2 * v2)
- cons_other = SVector{3, real(equations)}(rho_v1, rho_v2, rho_e)
+ cons_other = SVector(rho_v1, rho_v2, rho_e)
return vcat(cons_other, cons_rho)
end
@@ -740,7 +748,8 @@ end
rho = density(u, equations)
T = temperature(u, equations)
- total_entropy = zero(u[1])
+ RealT = eltype(u)
+ total_entropy = zero(RealT)
for i in eachcomponent(equations)
total_entropy -= u[i + 3] * (cv[i] * log(T) - gas_constants[i] * log(u[i + 3]))
end
@@ -754,7 +763,8 @@ end
rho_v1, rho_v2, rho_e = u
rho = density(u, equations)
- help1 = zero(rho)
+ RealT = eltype(u)
+ help1 = zero(RealT)
for i in eachcomponent(equations)
help1 += u[i + 3] * cv[i]
@@ -763,7 +773,7 @@ end
v1 = rho_v1 / rho
v2 = rho_v2 / rho
v_square = v1^2 + v2^2
- T = (rho_e - 0.5 * rho * v_square) / help1
+ T = (rho_e - 0.5f0 * rho * v_square) / help1
return T
end
@@ -777,8 +787,9 @@ partial density fractions as well as the partial specific heats at constant volu
@inline function totalgamma(u, equations::CompressibleEulerMulticomponentEquations2D)
@unpack cv, gammas = equations
- help1 = zero(u[1])
- help2 = zero(u[1])
+ RealT = eltype(u)
+ help1 = zero(RealT)
+ help2 = zero(RealT)
for i in eachcomponent(equations)
help1 += u[i + 3] * cv[i] * gammas[i]
@@ -794,13 +805,14 @@ end
rho = density(u, equations)
gamma = totalgamma(u, equations)
- rho_times_p = (gamma - 1) * (rho * rho_e - 0.5 * (rho_v1^2 + rho_v2^2))
+ rho_times_p = (gamma - 1) * (rho * rho_e - 0.5f0 * (rho_v1^2 + rho_v2^2))
return rho_times_p
end
@inline function density(u, equations::CompressibleEulerMulticomponentEquations2D)
- rho = zero(u[1])
+ RealT = eltype(u)
+ rho = zero(RealT)
for i in eachcomponent(equations)
rho += u[i + 3]
diff --git a/src/equations/compressible_euler_quasi_1d.jl b/src/equations/compressible_euler_quasi_1d.jl
index 9c7e3a7269b..936487186ec 100644
--- a/src/equations/compressible_euler_quasi_1d.jl
+++ b/src/equations/compressible_euler_quasi_1d.jl
@@ -78,18 +78,19 @@ A smooth initial condition used for convergence tests in combination with
"""
function initial_condition_convergence_test(x, t,
equations::CompressibleEulerEquationsQuasi1D)
+ RealT = eltype(x)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
ini = c + A * sin(ω * (x[1] - t))
rho = ini
- v1 = 1.0
+ v1 = 1
e = ini^2 / rho
- p = (equations.gamma - 1) * (e - 0.5 * rho * v1^2)
- a = 1.5 - 0.5 * cos(x[1] * pi)
+ p = (equations.gamma - 1) * (e - 0.5f0 * rho * v1^2)
+ a = 1.5f0 - 0.5f0 * cos(x[1] * convert(RealT, pi))
return prim2cons(SVector(rho, v1, p, a), equations)
end
@@ -108,19 +109,20 @@ as defined in [`initial_condition_convergence_test`](@ref).
equations::CompressibleEulerEquationsQuasi1D)
# Same settings as in `initial_condition_convergence_test`.
# Derivatives calculated with ForwardDiff.jl
+ RealT = eltype(u)
c = 2
- A = 0.1
+ A = convert(RealT, 0.1)
L = 2
- f = 1 / L
- ω = 2 * pi * f
+ f = 1.0f0 / L
+ ω = 2 * convert(RealT, pi) * f
x1, = x
ini(x1, t) = c + A * sin(ω * (x1 - t))
rho(x1, t) = ini(x1, t)
- v1(x1, t) = 1.0
+ v1(x1, t) = 1
e(x1, t) = ini(x1, t)^2 / rho(x1, t)
- p1(x1, t) = (equations.gamma - 1) * (e(x1, t) - 0.5 * rho(x1, t) * v1(x1, t)^2)
- a(x1, t) = 1.5 - 0.5 * cos(x1 * pi)
+ p1(x1, t) = (equations.gamma - 1) * (e(x1, t) - 0.5f0 * rho(x1, t) * v1(x1, t)^2)
+ a(x1, t) = 1.5f0 - 0.5f0 * cos(x1 * pi)
arho(x1, t) = a(x1, t) * rho(x1, t)
arhou(x1, t) = arho(x1, t) * v1(x1, t)
@@ -142,7 +144,7 @@ as defined in [`initial_condition_convergence_test`](@ref).
du2 = darhou_dt(x1, t) + darhouu_dx(x1, t) + a(x1, t) * dp1_dx(x1, t)
du3 = daE_dt(x1, t) + dauEp_dx(x1, t)
- return SVector(du1, du2, du3, 0.0)
+ return SVector(du1, du2, du3, 0)
end
# Calculate 1D flux for a single point
@@ -157,7 +159,7 @@ end
f2 = a_rho_v1 * v1
f3 = a * v1 * (e + p)
- return SVector(f1, f2, f3, zero(eltype(u)))
+ return SVector(f1, f2, f3, 0)
end
"""
@@ -189,9 +191,7 @@ Further details are available in the paper:
# in the arithmetic average of {p}.
p_avg = p_ll + p_rr
- z = zero(eltype(u_ll))
-
- return SVector(z, a_ll * p_avg, z, z)
+ return SVector(0, a_ll * p_avg, 0, 0)
end
# While `normal_direction` isn't strictly necessary in 1D, certain solvers assume that
@@ -239,19 +239,19 @@ Further details are available in the paper:
# log((ϱₗ/pₗ) / (ϱᵣ/pᵣ)) / (ϱₗ/pₗ - ϱᵣ/pᵣ)
# = pₗ pᵣ log((ϱₗ pᵣ) / (ϱᵣ pₗ)) / (ϱₗ pᵣ - ϱᵣ pₗ)
inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll)
- v1_avg = 0.5 * (v1_ll + v1_rr)
- a_v1_avg = 0.5 * (a_ll * v1_ll + a_rr * v1_rr)
- p_avg = 0.5 * (p_ll + p_rr)
- velocity_square_avg = 0.5 * (v1_ll * v1_rr)
+ v1_avg = 0.5f0 * (v1_ll + v1_rr)
+ a_v1_avg = 0.5f0 * (a_ll * v1_ll + a_rr * v1_rr)
+ p_avg = 0.5f0 * (p_ll + p_rr)
+ velocity_square_avg = 0.5f0 * (v1_ll * v1_rr)
# Calculate fluxes
# Ignore orientation since it is always "1" in 1D
f1 = rho_mean * a_v1_avg
f2 = rho_mean * a_v1_avg * v1_avg
f3 = f1 * (velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) +
- 0.5 * (p_ll * a_rr * v1_rr + p_rr * a_ll * v1_ll)
+ 0.5f0 * (p_ll * a_rr * v1_rr + p_rr * a_ll * v1_ll)
- return SVector(f1, f2, f3, zero(eltype(u_ll)))
+ return SVector(f1, f2, f3, 0)
end
# While `normal_direction` isn't strictly necessary in 1D, certain solvers assume that
@@ -276,13 +276,13 @@ end
e_ll = a_e_ll / a_ll
v1_ll = a_rho_v1_ll / a_rho_ll
v_mag_ll = abs(v1_ll)
- p_ll = (equations.gamma - 1) * (e_ll - 0.5 * rho_ll * v_mag_ll^2)
+ p_ll = (equations.gamma - 1) * (e_ll - 0.5f0 * rho_ll * v_mag_ll^2)
c_ll = sqrt(equations.gamma * p_ll / rho_ll)
rho_rr = a_rho_rr / a_rr
e_rr = a_e_rr / a_rr
v1_rr = a_rho_v1_rr / a_rho_rr
v_mag_rr = abs(v1_rr)
- p_rr = (equations.gamma - 1) * (e_rr - 0.5 * rho_rr * v_mag_rr^2)
+ p_rr = (equations.gamma - 1) * (e_rr - 0.5f0 * rho_rr * v_mag_rr^2)
c_rr = sqrt(equations.gamma * p_rr / rho_rr)
λ_max = max(v_mag_ll, v_mag_rr) + max(c_ll, c_rr)
@@ -293,7 +293,7 @@ end
rho = a_rho / a
v1 = a_rho_v1 / a_rho
e = a_e / a
- p = (equations.gamma - 1) * (e - 0.5 * rho * v1^2)
+ p = (equations.gamma - 1) * (e - 0.5f0 * rho * v1^2)
c = sqrt(equations.gamma * p / rho)
return (abs(v1) + c,)
diff --git a/test/test_type.jl b/test/test_type.jl
index de02ec47110..7bea104bb86 100644
--- a/test/test_type.jl
+++ b/test/test_type.jl
@@ -47,10 +47,13 @@ isdir(outdir) && rm(outdir, recursive = true)
for direction in directions
if RealT == Float32
# check `surface_flux_function` (test broken)
- @test_broken eltype(boundary_condition_wall(u_inner, orientation,
- direction, x, t,
- surface_flux_function,
- equations)) == RealT
+ @test_broken eltype(@inferred boundary_condition_wall(u_inner,
+ orientation,
+ direction, x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
else
@test eltype(@inferred boundary_condition_wall(u_inner, orientation,
direction, x, t,
@@ -62,10 +65,12 @@ isdir(outdir) && rm(outdir, recursive = true)
if RealT == Float32
# check `surface_flux_function` (test broken)
- @test_broken eltype(boundary_condition_slip_wall(u_inner, normal_direction,
- x, t,
- surface_flux_function,
- equations)) == RealT
+ @test_broken eltype(@inferred boundary_condition_slip_wall(u_inner,
+ normal_direction,
+ x, t,
+ surface_flux_function,
+ equations)) ==
+ RealT
else
@test eltype(@inferred boundary_condition_slip_wall(u_inner,
normal_direction, x, t,
@@ -75,7 +80,7 @@ isdir(outdir) && rm(outdir, recursive = true)
end
@test eltype(@inferred flux(u, normal_direction, equations)) == RealT
- @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
equations)) ==
RealT
@test eltype(@inferred dissipation(u_ll, u_rr, normal_direction, equations)) ==
@@ -83,7 +88,7 @@ isdir(outdir) && rm(outdir, recursive = true)
for orientation in orientations
@test eltype(@inferred flux(u, orientation, equations)) == RealT
- @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
equations)) ==
RealT
@test eltype(@inferred dissipation(u_ll, u_rr, orientation, equations)) ==
@@ -141,7 +146,8 @@ isdir(outdir) && rm(outdir, recursive = true)
@test eltype(@inferred flux_hllc(u_ll, u_rr, orientation, equations)) == RealT
if RealT == Float32
# check `ln_mean` (test broken)
- @test_broken eltype(flux_chandrashekar(u_ll, u_rr, orientation, equations)) ==
+ @test_broken eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
@@ -150,7 +156,8 @@ isdir(outdir) && rm(outdir, recursive = true)
end
if RealT == Float32
# check `ln_mean` and `inv_ln_mean` (test broken)
- @test_broken eltype(flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, orientation,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
@@ -167,7 +174,7 @@ isdir(outdir) && rm(outdir, recursive = true)
equations))) ==
RealT
- @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation, equations)) ==
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation, equations)) ==
RealT
@test eltype(@inferred min_max_speed_naive(u_ll, u_rr, orientation, equations)) ==
RealT
@@ -182,11 +189,11 @@ isdir(outdir) && rm(outdir, recursive = true)
@test eltype(@inferred prim2cons(u, equations)) == RealT
@test eltype(@inferred cons2entropy(u, equations)) == RealT
@test eltype(@inferred entropy2cons(u, equations)) == RealT
- @test eltype(@inferred density(u, equations)) == RealT
- @test eltype(@inferred pressure(u, equations)) == RealT
- @test eltype(@inferred density_pressure(u, equations)) == RealT
- @test eltype(@inferred entropy(cons, equations)) == RealT
- @test eltype(@inferred energy_internal(cons, equations)) == RealT
+ @test typeof(@inferred density(u, equations)) == RealT
+ @test typeof(@inferred pressure(u, equations)) == RealT
+ @test typeof(@inferred density_pressure(u, equations)) == RealT
+ @test typeof(@inferred entropy(cons, equations)) == RealT
+ @test typeof(@inferred energy_internal(cons, equations)) == RealT
end
end
@@ -247,8 +254,9 @@ isdir(outdir) && rm(outdir, recursive = true)
RealT
if RealT == Float32
# check `ln_mean` (test broken)
- @test_broken eltype(flux_chandrashekar(u_ll, u_rr, normal_direction,
- equations)) ==
+ @test_broken eltype(@inferred flux_chandrashekar(u_ll, u_rr,
+ normal_direction,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_chandrashekar(u_ll, u_rr, normal_direction,
@@ -257,7 +265,8 @@ isdir(outdir) && rm(outdir, recursive = true)
end
if RealT == Float32
# check `ln_mean` and `inv_ln_mean` (test broken)
- @test_broken eltype(flux_ranocha(u_ll, u_rr, normal_direction, equations)) ==
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
@@ -273,7 +282,7 @@ isdir(outdir) && rm(outdir, recursive = true)
equations))) ==
RealT
- @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
equations)) ==
RealT
@test eltype(@inferred min_max_speed_naive(u_ll, u_rr, normal_direction,
@@ -299,8 +308,9 @@ isdir(outdir) && rm(outdir, recursive = true)
RealT
if RealT == Float32
# check `ln_mean` (test broken)
- @test_broken eltype(flux_chandrashekar(u_ll, u_rr, orientation,
- equations)) ==
+ @test_broken eltype(@inferred flux_chandrashekar(u_ll, u_rr,
+ orientation,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
@@ -309,7 +319,8 @@ isdir(outdir) && rm(outdir, recursive = true)
end
if RealT == Float32
# check `ln_mean` and `inv_ln_mean` (test broken)
- @test_broken eltype(flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, orientation,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
@@ -329,7 +340,7 @@ isdir(outdir) && rm(outdir, recursive = true)
equations))) ==
RealT
- @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
equations)) ==
RealT
@test eltype(@inferred min_max_speed_naive(u_ll, u_rr, orientation,
@@ -348,15 +359,16 @@ isdir(outdir) && rm(outdir, recursive = true)
@test eltype(@inferred prim2cons(u, equations)) == RealT
@test eltype(@inferred cons2entropy(u, equations)) == RealT
@test eltype(@inferred entropy2cons(u, equations)) == RealT
- @test eltype(@inferred Trixi.entropy_guermond_etal(u, equations)) == RealT
@test eltype(@inferred Trixi.cons2entropy_guermond_etal(u, equations)) == RealT
- @test eltype(@inferred density(u, equations)) == RealT
- @test eltype(@inferred pressure(u, equations)) == RealT
- @test eltype(@inferred density_pressure(u, equations)) == RealT
- @test eltype(@inferred entropy(cons, equations)) == RealT
- @test eltype(@inferred Trixi.entropy_math(cons, equations)) == RealT
- @test eltype(@inferred Trixi.entropy_thermodynamic(cons, equations)) == RealT
- @test eltype(@inferred energy_internal(cons, equations)) == RealT
+ @test typeof(@inferred Trixi.entropy_guermond_etal(u, equations)) == RealT
+ @test typeof(@inferred density(u, equations)) == RealT
+ @test typeof(@inferred pressure(u, equations)) == RealT
+ @test typeof(@inferred density_pressure(u, equations)) == RealT
+ @test typeof(@inferred entropy(cons, equations)) == RealT
+ @test typeof(@inferred Trixi.entropy_math(cons, equations)) == RealT
+ @test typeof(@inferred Trixi.entropy_thermodynamic(cons, equations)) == RealT
+ @test typeof(@inferred energy_internal(cons, equations)) == RealT
+
# TODO: test `gradient_conservative`, not necessary but good to have
end
end
@@ -416,22 +428,24 @@ isdir(outdir) && rm(outdir, recursive = true)
RealT
if RealT == Float32
# check `ln_mean` (test broken)
- @test_broken eltype(flux_chandrashekar(u_ll, u_rr, normal_direction,
- equations)) == RealT
+ @test_broken eltype(@inferred flux_chandrashekar(u_ll, u_rr,
+ normal_direction,
+ equations)) == RealT
else
@test eltype(@inferred flux_chandrashekar(u_ll, u_rr, normal_direction,
equations)) == RealT
end
if RealT == Float32
# check `ln_mean` and `inv_ln_mean` (test broken)
- @test_broken eltype(flux_ranocha(u_ll, u_rr, normal_direction, equations)) ==
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
equations)) == RealT
end
- @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
equations)) ==
RealT
@test eltype(@inferred min_max_speed_naive(u_ll, u_rr, normal_direction,
@@ -454,15 +468,17 @@ isdir(outdir) && rm(outdir, recursive = true)
RealT
if RealT == Float32
# check `ln_mean` (test broken)
- @test_broken eltype(flux_chandrashekar(u_ll, u_rr, orientation,
- equations)) == RealT
+ @test_broken eltype(@inferred flux_chandrashekar(u_ll, u_rr,
+ orientation,
+ equations)) == RealT
else
@test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
equations)) == RealT
end
if RealT == Float32
# check `ln_mean` and `inv_ln_mean` (test broken)
- @test_broken eltype(flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, orientation,
+ equations)) ==
RealT
else
@test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
@@ -473,7 +489,7 @@ isdir(outdir) && rm(outdir, recursive = true)
equations))) ==
RealT
- @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
equations)) == RealT
@test eltype(@inferred min_max_speed_naive(u_ll, u_rr, orientation,
equations)) == RealT
@@ -488,13 +504,185 @@ isdir(outdir) && rm(outdir, recursive = true)
@test eltype(@inferred prim2cons(u, equations)) == RealT
@test eltype(@inferred cons2entropy(u, equations)) == RealT
@test eltype(@inferred entropy2cons(u, equations)) == RealT
- @test eltype(@inferred density(u, equations)) == RealT
- @test eltype(@inferred pressure(u, equations)) == RealT
- @test eltype(@inferred density_pressure(u, equations)) == RealT
- @test eltype(@inferred entropy(cons, equations)) == RealT
- @test eltype(@inferred Trixi.entropy_math(cons, equations)) == RealT
- @test eltype(@inferred Trixi.entropy_thermodynamic(cons, equations)) == RealT
- @test eltype(@inferred energy_internal(cons, equations)) == RealT
+ @test typeof(@inferred density(u, equations)) == RealT
+ @test typeof(@inferred pressure(u, equations)) == RealT
+ @test typeof(@inferred density_pressure(u, equations)) == RealT
+ @test typeof(@inferred entropy(cons, equations)) == RealT
+ @test typeof(@inferred Trixi.entropy_math(cons, equations)) == RealT
+ @test typeof(@inferred Trixi.entropy_thermodynamic(cons, equations)) == RealT
+ @test typeof(@inferred energy_internal(cons, equations)) == RealT
+ end
+ end
+
+ @timed_testset "Compressible Euler Multicomponent 1D" begin
+ for RealT in (Float32, Float64)
+ gammas = (RealT(1.4), RealT(1.4))
+ gas_constants = (RealT(0.4), RealT(0.4))
+ equations = @inferred CompressibleEulerMulticomponentEquations1D(gammas = gammas,
+ gas_constants = gas_constants)
+
+ x = SVector(zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = SVector(one(RealT), one(RealT), one(RealT), one(RealT))
+ orientation = 1
+
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_weak_blast_wave(x, t, equations)) ==
+ RealT
+
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ if RealT == Float32
+ # check `ln_mean` (test broken)
+ @test_broken eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) == RealT
+ else
+ @test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) == RealT
+ end
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ end
+
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.max_abs_speeds(u, equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred prim2cons(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test eltype(@inferred entropy2cons(u, equations)) == RealT
+ @test typeof(@inferred Trixi.total_entropy(u, equations)) == RealT
+ @test typeof(@inferred Trixi.temperature(u, equations)) == RealT
+ @test typeof(@inferred Trixi.totalgamma(u, equations)) == RealT
+ @test typeof(@inferred density(u, equations)) == RealT
+ @test typeof(@inferred pressure(u, equations)) == RealT
+ end
+ end
+
+ @timed_testset "Compressible Euler Multicomponent 2D" begin
+ for RealT in (Float32, Float64)
+ gammas = (RealT(1.4), RealT(1.4))
+ gas_constants = (RealT(0.4), RealT(0.4))
+ equations = @inferred CompressibleEulerMulticomponentEquations2D(gammas = gammas,
+ gas_constants = gas_constants)
+
+ x = SVector(zero(RealT), zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = SVector(one(RealT), one(RealT), one(RealT), one(RealT),
+ one(RealT))
+ orientations = [1, 2]
+ normal_direction = SVector(one(RealT), zero(RealT))
+
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_weak_blast_wave(x, t, equations)) ==
+ RealT
+
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+
+ @test eltype(@inferred flux(u, normal_direction, equations)) == RealT
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+ end
+
+ for orientation in orientations
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ if RealT == Float32
+ # check `ln_mean` (test broken)
+ @test_broken eltype(@inferred flux_chandrashekar(u_ll, u_rr,
+ orientation,
+ equations)) == RealT
+ else
+ @test eltype(@inferred flux_chandrashekar(u_ll, u_rr, orientation,
+ equations)) == RealT
+ end
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(@inferred flux_ranocha(u_ll, u_rr, orientation,
+ equations)) == RealT
+ else
+ @test eltype(@inferred flux_ranocha(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ end
+
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ end
+
+ @test eltype(@inferred Trixi.max_abs_speeds(u, equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred prim2cons(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test eltype(@inferred entropy2cons(u, equations)) == RealT
+ @test typeof(@inferred Trixi.total_entropy(u, equations)) == RealT
+ @test typeof(@inferred Trixi.temperature(u, equations)) == RealT
+ @test typeof(@inferred Trixi.totalgamma(u, equations)) == RealT
+ @test typeof(@inferred density(u, equations)) == RealT
+ @test typeof(@inferred density_pressure(u, equations)) == RealT
+ end
+ end
+
+ @timed_testset "Compressible Euler Quasi 1D" begin
+ for RealT in (Float32, Float64)
+ equations = @inferred CompressibleEulerEquationsQuasi1D(RealT(1.4))
+
+ x = SVector(zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = SVector(one(RealT), one(RealT), one(RealT), one(RealT))
+ orientation = 1
+ normal_direction = normal_ll = normal_rr = SVector(one(RealT))
+
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred source_terms_convergence_test(u, x, t, equations)) ==
+ RealT
+
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred flux_nonconservative_chan_etal(u_ll, u_rr, orientation,
+ equations)) == RealT
+ @test eltype(@inferred flux_nonconservative_chan_etal(u_ll, u_rr,
+ normal_direction,
+ equations)) ==
+ RealT
+ @test eltype(@inferred flux_nonconservative_chan_etal(u_ll, u_rr, normal_ll,
+ normal_rr, equations)) ==
+ RealT
+ if RealT == Float32
+ # check `ln_mean` and `inv_ln_mean` (test broken)
+ @test_broken eltype(@inferred flux_chan_etal(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ else
+ @test eltype(@inferred flux_chan_etal(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ end
+
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.max_abs_speeds(u, equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred prim2cons(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test typeof(@inferred entropy(u, equations)) == RealT
+ @test typeof(@inferred density(u, equations)) == RealT
+ @test typeof(@inferred pressure(u, equations)) == RealT
+ @test typeof(@inferred density_pressure(u, equations)) == RealT
end
end
end
From 961f64bb7015ca1908fe29f4a83bfe96955aed52 Mon Sep 17 00:00:00 2001
From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com>
Date: Fri, 14 Jun 2024 17:14:10 +0200
Subject: [PATCH 39/44] Fix timings for `SemidiscretizationCoupled` (#1978)
---
src/semidiscretization/semidiscretization_coupled.jl | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl
index c5c21584dca..9011ac91215 100644
--- a/src/semidiscretization/semidiscretization_coupled.jl
+++ b/src/semidiscretization/semidiscretization_coupled.jl
@@ -125,7 +125,7 @@ end
"""
ndofsglobal(semi::SemidiscretizationCoupled)
-
+
Return the global number of degrees of freedom associated with each scalar variable across all MPI ranks, and summed up over all coupled systems.
This is the same as [`ndofs`](@ref) for simulations running in serial or
parallelized via threads. It will in general be different for simulations
@@ -180,12 +180,10 @@ function rhs!(du_ode, u_ode, semi::SemidiscretizationCoupled, t)
end
# Call rhs! for each semidiscretization
- @trixi_timeit timer() "copy to coupled boundaries" begin
- foreach_enumerate(semi.semis) do (i, semi_)
- u_loc = get_system_u_ode(u_ode, i, semi)
- du_loc = get_system_u_ode(du_ode, i, semi)
- rhs!(du_loc, u_loc, semi_, t)
- end
+ foreach_enumerate(semi.semis) do (i, semi_)
+ u_loc = get_system_u_ode(u_ode, i, semi)
+ du_loc = get_system_u_ode(du_ode, i, semi)
+ rhs!(du_loc, u_loc, semi_, t)
end
runtime = time_ns() - time_start
From 6d7a1dc398a475c47c03140dd0ed6ab04d119a2b Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 15 Jun 2024 09:49:42 +0200
Subject: [PATCH 40/44] set version to v0.7.16
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index a6911d4e2c3..63e95f447d9 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.16-pre"
+version = "0.7.16"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 6bb0be5346912374fbab8f10fb37162057548507 Mon Sep 17 00:00:00 2001
From: Hendrik Ranocha
Date: Sat, 15 Jun 2024 09:49:58 +0200
Subject: [PATCH 41/44] set development version to v0.7.17-pre
---
Project.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Project.toml b/Project.toml
index 63e95f447d9..5a4fc875fcb 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.7.16"
+version = "0.7.17-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
From 5398b2257a68c8795be4ee06b5ee3e947e006ff8 Mon Sep 17 00:00:00 2001
From: Huiyu Xie
Date: Sun, 16 Jun 2024 02:39:37 -1000
Subject: [PATCH 42/44] Add numerical support of other real types
(`linear_advection`) (#1971)
* start
* complete equations
* fix zeros
* apply suggestions
Co-authored-by: Hendrik Ranocha
* minor fixes
* complete unit tests
---------
Co-authored-by: Hendrik Ranocha
---
src/equations/linear_scalar_advection_1d.jl | 24 +-
src/equations/linear_scalar_advection_2d.jl | 20 +-
src/equations/linear_scalar_advection_3d.jl | 20 +-
test/test_type.jl | 243 ++++++++++++++++++++
4 files changed, 279 insertions(+), 28 deletions(-)
diff --git a/src/equations/linear_scalar_advection_1d.jl b/src/equations/linear_scalar_advection_1d.jl
index 6c6b9dd3721..743d2df870a 100644
--- a/src/equations/linear_scalar_advection_1d.jl
+++ b/src/equations/linear_scalar_advection_1d.jl
@@ -34,9 +34,10 @@ A constant initial condition to test free-stream preservation.
"""
function initial_condition_constant(x, t, equation::LinearScalarAdvectionEquation1D)
# Store translated coordinate for easy use of exact solution
+ RealT = eltype(x)
x_trans = x - equation.advection_velocity * t
- return SVector(2.0)
+ return SVector(RealT(2))
end
"""
@@ -49,13 +50,14 @@ in non-periodic domains).
function initial_condition_convergence_test(x, t,
equation::LinearScalarAdvectionEquation1D)
# Store translated coordinate for easy use of exact solution
+ RealT = eltype(x)
x_trans = x - equation.advection_velocity * t
- c = 1.0
- A = 0.5
+ c = 1
+ A = 0.5f0
L = 2
- f = 1 / L
- omega = 2 * pi * f
+ f = 1.0f0 / L
+ omega = 2 * convert(RealT, pi) * f
scalar = c + A * sin(omega * sum(x_trans))
return SVector(scalar)
end
@@ -161,7 +163,7 @@ function flux_engquist_osher(u_ll, u_rr, orientation::Int,
u_L = u_ll[1]
u_R = u_rr[1]
- return SVector(0.5 * (flux(u_L, orientation, equation) +
+ return SVector(0.5f0 * (flux(u_L, orientation, equation) +
flux(u_R, orientation, equation) -
abs(equation.advection_velocity[orientation]) * (u_R - u_L)))
end
@@ -200,14 +202,16 @@ end
@inline function splitting_lax_friedrichs(u, ::Val{:plus}, orientation::Integer,
equations::LinearScalarAdvectionEquation1D)
+ RealT = eltype(u)
a = equations.advection_velocity[1]
- return a > 0 ? flux(u, orientation, equations) : zero(u)
+ return a > 0 ? flux(u, orientation, equations) : SVector(zero(RealT))
end
@inline function splitting_lax_friedrichs(u, ::Val{:minus}, orientation::Integer,
equations::LinearScalarAdvectionEquation1D)
+ RealT = eltype(u)
a = equations.advection_velocity[1]
- return a < 0 ? flux(u, orientation, equations) : zero(u)
+ return a < 0 ? flux(u, orientation, equations) : SVector(zero(RealT))
end
# Convert conservative variables to primitive
@@ -217,11 +221,11 @@ end
@inline cons2entropy(u, equation::LinearScalarAdvectionEquation1D) = u
# Calculate entropy for a conservative state `cons`
-@inline entropy(u::Real, ::LinearScalarAdvectionEquation1D) = 0.5 * u^2
+@inline entropy(u::Real, ::LinearScalarAdvectionEquation1D) = 0.5f0 * u^2
@inline entropy(u, equation::LinearScalarAdvectionEquation1D) = entropy(u[1], equation)
# Calculate total energy for a conservative state `cons`
-@inline energy_total(u::Real, ::LinearScalarAdvectionEquation1D) = 0.5 * u^2
+@inline energy_total(u::Real, ::LinearScalarAdvectionEquation1D) = 0.5f0 * u^2
@inline function energy_total(u, equation::LinearScalarAdvectionEquation1D)
energy_total(u[1], equation)
end
diff --git a/src/equations/linear_scalar_advection_2d.jl b/src/equations/linear_scalar_advection_2d.jl
index d90bf0c8793..5e4f8463f52 100644
--- a/src/equations/linear_scalar_advection_2d.jl
+++ b/src/equations/linear_scalar_advection_2d.jl
@@ -34,8 +34,8 @@ varnames(::typeof(cons2prim), ::LinearScalarAdvectionEquation2D) = ("scalar",)
function x_trans_periodic_2d(x, domain_length = SVector(10, 10), center = SVector(0, 0))
x_normalized = x .- center
x_shifted = x_normalized .% domain_length
- x_offset = ((x_shifted .< -0.5 * domain_length) -
- (x_shifted .> 0.5 * domain_length)) .* domain_length
+ x_offset = ((x_shifted .< -0.5f0 * domain_length) -
+ (x_shifted .> 0.5f0 * domain_length)) .* domain_length
return center + x_shifted + x_offset
end
@@ -47,9 +47,10 @@ A constant initial condition to test free-stream preservation.
"""
function initial_condition_constant(x, t, equation::LinearScalarAdvectionEquation2D)
# Store translated coordinate for easy use of exact solution
+ RealT = eltype(x)
x_trans = x_trans_periodic_2d(x - equation.advection_velocity * t)
- return SVector(2.0)
+ return SVector(RealT(2))
end
"""
@@ -60,13 +61,14 @@ A smooth initial condition used for convergence tests.
function initial_condition_convergence_test(x, t,
equation::LinearScalarAdvectionEquation2D)
# Store translated coordinate for easy use of exact solution
+ RealT = eltype(x)
x_trans = x - equation.advection_velocity * t
- c = 1.0
- A = 0.5
+ c = 1
+ A = 0.5f0
L = 2
- f = 1 / L
- omega = 2 * pi * f
+ f = 1.0f0 / L
+ omega = 2 * convert(RealT, pi) * f
scalar = c + A * sin(omega * sum(x_trans))
return SVector(scalar)
end
@@ -280,11 +282,11 @@ end
@inline cons2entropy(u, equation::LinearScalarAdvectionEquation2D) = u
# Calculate entropy for a conservative state `cons`
-@inline entropy(u::Real, ::LinearScalarAdvectionEquation2D) = 0.5 * u^2
+@inline entropy(u::Real, ::LinearScalarAdvectionEquation2D) = 0.5f0 * u^2
@inline entropy(u, equation::LinearScalarAdvectionEquation2D) = entropy(u[1], equation)
# Calculate total energy for a conservative state `cons`
-@inline energy_total(u::Real, ::LinearScalarAdvectionEquation2D) = 0.5 * u^2
+@inline energy_total(u::Real, ::LinearScalarAdvectionEquation2D) = 0.5f0 * u^2
@inline function energy_total(u, equation::LinearScalarAdvectionEquation2D)
energy_total(u[1], equation)
end
diff --git a/src/equations/linear_scalar_advection_3d.jl b/src/equations/linear_scalar_advection_3d.jl
index 7b19974eb49..088f934cc3e 100644
--- a/src/equations/linear_scalar_advection_3d.jl
+++ b/src/equations/linear_scalar_advection_3d.jl
@@ -38,9 +38,10 @@ A constant initial condition to test free-stream preservation.
"""
function initial_condition_constant(x, t, equation::LinearScalarAdvectionEquation3D)
# Store translated coordinate for easy use of exact solution
+ RealT = eltype(x)
x_trans = x - equation.advection_velocity * t
- return SVector(2.0)
+ return SVector(RealT(2))
end
"""
@@ -51,13 +52,14 @@ A smooth initial condition used for convergence tests.
function initial_condition_convergence_test(x, t,
equation::LinearScalarAdvectionEquation3D)
# Store translated coordinate for easy use of exact solution
+ RealT = eltype(x)
x_trans = x - equation.advection_velocity * t
- c = 1.0
- A = 0.5
+ c = 1
+ A = 0.5f0
L = 2
- f = 1 / L
- omega = 2 * pi * f
+ f = 1.0f0 / L
+ omega = 2 * convert(RealT, pi) * f
scalar = c + A * sin(omega * sum(x_trans))
return SVector(scalar)
end
@@ -82,10 +84,10 @@ A sine wave in the conserved variable.
"""
function initial_condition_sin(x, t, equation::LinearScalarAdvectionEquation3D)
# Store translated coordinate for easy use of exact solution
+ RealT = eltype(x)
x_trans = x - equation.advection_velocity * t
- scalar = sin(2 * pi * x_trans[1]) * sin(2 * pi * x_trans[2]) *
- sin(2 * pi * x_trans[3])
+ scalar = sinpi(2 * x_trans[1]) * sinpi(2 * x_trans[2]) * sinpi(2 * x_trans[3])
return SVector(scalar)
end
@@ -199,11 +201,11 @@ end
@inline cons2entropy(u, equation::LinearScalarAdvectionEquation3D) = u
# Calculate entropy for a conservative state `cons`
-@inline entropy(u::Real, ::LinearScalarAdvectionEquation3D) = 0.5 * u^2
+@inline entropy(u::Real, ::LinearScalarAdvectionEquation3D) = 0.5f0 * u^2
@inline entropy(u, equation::LinearScalarAdvectionEquation3D) = entropy(u[1], equation)
# Calculate total energy for a conservative state `cons`
-@inline energy_total(u::Real, ::LinearScalarAdvectionEquation3D) = 0.5 * u^2
+@inline energy_total(u::Real, ::LinearScalarAdvectionEquation3D) = 0.5f0 * u^2
@inline function energy_total(u, equation::LinearScalarAdvectionEquation3D)
energy_total(u[1], equation)
end
diff --git a/test/test_type.jl b/test/test_type.jl
index 7bea104bb86..933d364185e 100644
--- a/test/test_type.jl
+++ b/test/test_type.jl
@@ -685,6 +685,249 @@ isdir(outdir) && rm(outdir, recursive = true)
@test typeof(@inferred density_pressure(u, equations)) == RealT
end
end
+
+ @timed_testset "Linear Scalar Advection 1D" begin
+ for RealT in (Float32, Float64)
+ equations = @inferred LinearScalarAdvectionEquation1D(RealT(1))
+
+ x = SVector(zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = u_inner = SVector(one(RealT))
+ orientation = 1
+ directions = [1, 2]
+
+ surface_flux_function = flux_lax_friedrichs
+
+ @test eltype(@inferred initial_condition_constant(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_gauss(x, t, equations)) == RealT
+ @test eltype(@inferred Trixi.initial_condition_sin(x, t, equations)) == RealT
+ @test eltype(@inferred Trixi.initial_condition_linear_x(x, t, equations)) ==
+ RealT
+
+ for direction in directions
+ if RealT == Float32
+ # check `surface_flux_function` (test broken)
+ @test_broken eltype(@inferred Trixi.boundary_condition_linear_x(u_inner,
+ orientation,
+ direction,
+ x, t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ else
+ @test eltype(@inferred Trixi.boundary_condition_linear_x(u_inner,
+ orientation,
+ direction, x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ end
+ end
+
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred flux_godunov(u_ll, u_rr, orientation, equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.flux_engquist_osher(u_ll, u_rr, orientation,
+ equations)) == RealT
+
+ @test eltype(eltype(@inferred splitting_lax_friedrichs(u, orientation,
+ equations))) ==
+ RealT
+
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ equations)) == RealT
+ @test eltype(@inferred Trixi.max_abs_speeds(equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test typeof(@inferred entropy(u, equations)) == RealT
+ @test typeof(@inferred energy_total(u, equations)) == RealT
+ end
+ end
+
+ @timed_testset "Linear Scalar Advection 2D" begin
+ for RealT in (Float32, Float64)
+ equations = @inferred LinearScalarAdvectionEquation2D(RealT(1), RealT(1))
+
+ x = SVector(zero(RealT), zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = u_inner = SVector(one(RealT))
+ orientations = [1, 2]
+ directions = [1, 2, 3, 4]
+ normal_direction = SVector(one(RealT), zero(RealT))
+
+ surface_flux_function = flux_lax_friedrichs
+
+ @test eltype(@inferred Trixi.x_trans_periodic_2d(x)) == RealT
+
+ @test eltype(@inferred initial_condition_constant(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_gauss(x, t, equations)) == RealT
+ @test eltype(@inferred Trixi.initial_condition_sin_sin(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.initial_condition_linear_x_y(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.initial_condition_linear_x(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.initial_condition_linear_y(x, t, equations)) ==
+ RealT
+
+ for orientation in orientations
+ for direction in directions
+ if RealT == Float32
+ # check `surface_flux_function` (test broken)
+ @test_broken eltype(@inferred Trixi.boundary_condition_linear_x_y(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ @test_broken eltype(@inferred Trixi.boundary_condition_linear_x(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ @test_broken eltype(@inferred Trixi.boundary_condition_linear_y(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ else
+ @test eltype(@inferred Trixi.boundary_condition_linear_x_y(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.boundary_condition_linear_x(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ @test eltype(@inferred Trixi.boundary_condition_linear_y(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ end
+ end
+ end
+
+ @test eltype(@inferred flux(u, normal_direction, equations)) == RealT
+ @test eltype(@inferred flux_godunov(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ equations)) ==
+ RealT
+
+ for orientation in orientations
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred flux_godunov(u_ll, u_rr, orientation, equations)) ==
+ RealT
+
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ equations)) ==
+ RealT
+ end
+
+ @test eltype(@inferred Trixi.max_abs_speeds(equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test typeof(@inferred entropy(u, equations)) == RealT
+ @test typeof(@inferred energy_total(u, equations)) == RealT
+ end
+ end
+
+ @timed_testset "Linear Scalar Advection 3D" begin
+ for RealT in (Float32, Float64)
+ equations = @inferred LinearScalarAdvectionEquation3D(RealT(1), RealT(1),
+ RealT(1))
+
+ x = SVector(zero(RealT), zero(RealT), zero(RealT))
+ t = zero(RealT)
+ u = u_ll = u_rr = u_inner = SVector(one(RealT))
+ orientations = [1, 2, 3]
+ directions = [1, 2, 3, 4, 5, 6]
+ normal_direction = SVector(one(RealT), zero(RealT), zero(RealT))
+
+ surface_flux_function = flux_lax_friedrichs
+
+ @test eltype(@inferred initial_condition_constant(x, t, equations)) == RealT
+ @test eltype(@inferred initial_condition_convergence_test(x, t, equations)) ==
+ RealT
+ @test eltype(@inferred initial_condition_gauss(x, t, equations)) == RealT
+ @test eltype(@inferred Trixi.initial_condition_sin(x, t, equations)) == RealT
+ @test eltype(@inferred Trixi.initial_condition_linear_z(x, t, equations)) ==
+ RealT
+
+ for orientation in orientations
+ for direction in directions
+ if RealT == Float32
+ # check `surface_flux_function` (test broken)
+ @test_broken eltype(@inferred Trixi.boundary_condition_linear_z(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ else
+ @test eltype(@inferred Trixi.boundary_condition_linear_z(u_inner,
+ orientation,
+ direction,
+ x,
+ t,
+ surface_flux_function,
+ equations)) ==
+ RealT
+ end
+ end
+ end
+
+ @test eltype(@inferred flux(u, normal_direction, equations)) == RealT
+ @test eltype(@inferred flux_godunov(u_ll, u_rr, normal_direction, equations)) ==
+ RealT
+
+ @test eltype(@inferred max_abs_speed_naive(u_ll, u_rr, normal_direction,
+ equations)) == RealT
+
+ for orientation in orientations
+ @test eltype(@inferred flux(u, orientation, equations)) == RealT
+ @test eltype(@inferred flux_godunov(u_ll, u_rr, orientation, equations)) ==
+ RealT
+
+ @test typeof(@inferred max_abs_speed_naive(u_ll, u_rr, orientation,
+ equations)) == RealT
+ end
+
+ @test eltype(@inferred Trixi.max_abs_speeds(equations)) == RealT
+ @test eltype(@inferred cons2prim(u, equations)) == RealT
+ @test eltype(@inferred cons2entropy(u, equations)) == RealT
+ @test typeof(@inferred entropy(u, equations)) == RealT
+ @test typeof(@inferred energy_total(u, equations)) == RealT
+ end
+ end
end
end # module
From 16bdaf0101816a7cb5312d99b04721b49e587353 Mon Sep 17 00:00:00 2001
From: Daniel Doehring
Date: Tue, 18 Jun 2024 09:08:42 +0200
Subject: [PATCH 43/44] Do not set finalstep false in PERK2 step! (#1983)
---
.../paired_explicit_runge_kutta/methods_PERK2.jl | 2 --
1 file changed, 2 deletions(-)
diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
index 1d9680153e6..8a5c2a24617 100644
--- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
+++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl
@@ -296,8 +296,6 @@ function step!(integrator::PairedExplicitRK2Integrator)
t_end = last(prob.tspan)
callbacks = integrator.opts.callback
- integrator.finalstep = false
-
@assert !integrator.finalstep
if isnan(integrator.dt)
error("time step size `dt` is NaN")
From 75d8c67629562efd24b2a04e46d22b0a1f4f572c Mon Sep 17 00:00:00 2001
From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com>
Date: Tue, 18 Jun 2024 09:36:54 +0200
Subject: [PATCH 44/44] Make `copy_to_coupled_boundary!` threaded (#1981)
* Make `copy_to_coupled_boundary!` threaded
* Update src/semidiscretization/semidiscretization_coupled.jl
Co-authored-by: Hendrik Ranocha
* Reformat
---------
Co-authored-by: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com>
Co-authored-by: Hendrik Ranocha
---
.../semidiscretization_coupled.jl | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl
index 9011ac91215..4843f211089 100644
--- a/src/semidiscretization/semidiscretization_coupled.jl
+++ b/src/semidiscretization/semidiscretization_coupled.jl
@@ -603,10 +603,14 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{
i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh_other, 1))
j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh_other, 2))
- i_cell = i_cell_start
- j_cell = j_cell_start
+ # We need indices starting at 1 for the handling of `i_cell` etc.
+ Base.require_one_based_indexing(cells)
+
+ @threaded for i in eachindex(cells)
+ cell = cells[i]
+ i_cell = i_cell_start + (i - 1) * i_cell_step
+ j_cell = j_cell_start + (i - 1) * j_cell_step
- for cell in cells
i_node = i_node_start
j_node = j_node_start
element_id = linear_indices[i_cell, j_cell]
@@ -628,9 +632,6 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{
i_node += i_node_step
j_node += j_node_step
end
-
- i_cell += i_cell_step
- j_cell += j_cell_step
end
end