From bb074193b9f8b1f66be706ef2a5db8cad7fa6b6b Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 26 Jun 2023 16:31:21 +0100 Subject: [PATCH 001/166] Added coupling converters. --- src/Trixi.jl | 2 ++ .../coupling_converters.jl | 13 ++++++++++ .../coupling_converters_2d.jl | 25 +++++++++++++++++++ 3 files changed, 40 insertions(+) create mode 100644 src/coupling_converters/coupling_converters.jl create mode 100644 src/coupling_converters/coupling_converters_2d.jl diff --git a/src/Trixi.jl b/src/Trixi.jl index 66878f4b459..85b536352bd 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -189,6 +189,8 @@ export boundary_condition_do_nothing, BoundaryConditionNavierStokesWall, NoSlip, Adiabatic, Isothermal, BoundaryConditionCoupled +export coupling_converter_heaviside_2d + export initial_condition_convergence_test, source_terms_convergence_test export source_terms_harmonic export initial_condition_poisson_nonperiodic, source_terms_poisson_nonperiodic, diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl new file mode 100644 index 00000000000..a82b849f284 --- /dev/null +++ b/src/coupling_converters/coupling_converters.jl @@ -0,0 +1,13 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +#################################################################################################### +# Include files with actual implementations for different systems of equations. + +include("coupling_converters_2d.jl") + +end # @muladd diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl new file mode 100644 index 00000000000..f18058632c0 --- /dev/null +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -0,0 +1,25 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + Coupling converter function for a system of two LinearScalarAdvectionEquation2D. + +The coupling is given as a Heaviside step. +```math +c(x) = {c_0, for x \ge x_0 \times s + 0, for x < x_0 \times s} +``` +Here, `s` is the sign of the step function, x_0 the save_position +of the step and c_0 the amplitude. +""" +function coupling_converter_heaviside_2d(x, x_0, c_0, s, + equations_left::LinearScalarAdvectionEquation2D, + equation_right::LinearScalarAdvectionEquation2D) + return c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 +end + +end # @muladd From 95892bf8348689fa19e303f0d37c71cb563c3a05 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Thu, 29 Jun 2023 14:23:06 +0100 Subject: [PATCH 002/166] Added generic converter_function for structured 2d meshes. --- src/Trixi.jl | 1 + .../coupling_converters_2d.jl | 20 +++++++++++++++---- .../semidiscretization_coupled.jl | 14 ++++++++----- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/Trixi.jl b/src/Trixi.jl index 85b536352bd..0759ba129d2 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -123,6 +123,7 @@ include("callbacks_step/callbacks_step.jl") include("callbacks_stage/callbacks_stage.jl") include("semidiscretization/semidiscretization_euler_gravity.jl") include("time_integration/time_integration.jl") +include("coupling_converters/coupling_converters.jl") # `trixi_include` and special elixirs such as `convergence_test` include("auxiliary/special_elixirs.jl") diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index f18058632c0..e360ffc4eac 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -5,6 +5,7 @@ @muladd begin #! format: noindent + @doc raw""" Coupling converter function for a system of two LinearScalarAdvectionEquation2D. @@ -16,10 +17,21 @@ c(x) = {c_0, for x \ge x_0 \times s Here, `s` is the sign of the step function, x_0 the save_position of the step and c_0 the amplitude. """ -function coupling_converter_heaviside_2d(x, x_0, c_0, s, - equations_left::LinearScalarAdvectionEquation2D, - equation_right::LinearScalarAdvectionEquation2D) - return c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 +function coupling_converter_heaviside_2d(x_0, c_0, s) + return (x, u) -> c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 * u +end + + +@doc raw""" + Coupling converter function for a system of two LinearScalarAdvectionEquation2D. + +The coupling is given as a linear function. +```math +c(x) = x * u(x) +``` +""" +function coupling_converter_linear_2d() + return (x, u) -> x[2]*u end end # @muladd diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index b7adff78425..babf4527a57 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -371,8 +371,9 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic other_semi_index :: Int other_orientation :: Int indices :: Indices + coupling_converter :: Function - function BoundaryConditionCoupled(other_semi_index, indices, uEltype) + function BoundaryConditionCoupled(other_semi_index, indices, uEltype, coupling_converter) NDIMS = length(indices) u_boundary = Array{uEltype, NDIMS * 2 - 1}(undef, ntuple(_ -> 0, NDIMS * 2 - 1)) @@ -385,7 +386,7 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic end new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices)}(u_boundary, other_semi_index, - other_orientation, indices) + other_orientation, indices, coupling_converter) end end @@ -495,9 +496,12 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ for i in eachnode(solver) for v in 1:size(u, 1) - boundary_condition.u_boundary[v, i, cell] = u[v, i_node, j_node, - linear_indices[i_cell, - j_cell]] + x = cache.elements.node_coordinates[:, i_node, j_node, linear_indices[i_cell, j_cell]] + converted_u = boundary_condition.coupling_converter(x, u[:, i_node, j_node, linear_indices[i_cell, j_cell]]) + boundary_condition.u_boundary[v, i, cell] = converted_u[v] + # boundary_condition.u_boundary[v, i, cell] = u[v, i_node, j_node, + # linear_indices[i_cell, + # j_cell]] end i_node += i_node_step j_node += j_node_step From 4e82276d4717eca3a78bc4d083d5dc47ca516240 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 3 Jul 2023 16:33:47 +0100 Subject: [PATCH 003/166] Added example elixir for coupling converters. --- .../elixir_advection_coupled_converter.jl | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl new file mode 100644 index 00000000000..5546209aa19 --- /dev/null +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -0,0 +1,127 @@ +using OrdinaryDiffEq +using Trixi +using Trixi2Vtk + + +############################################################################### +# Coupled semidiscretization of two linear advection systems, which are connected periodically +# +# In this elixir, we have a square domain that is divided into a left half and a right half. On each +# half of the domain, a completely independent SemidiscretizationHyperbolic is created for the +# linear advection equations. The two systems are coupled in the x-direction and have periodic +# boundaries in the y-direction. For a high-level overview, see also the figure below: +# +# (-1, 1) ( 1, 1) +# ┌────────────────────┬────────────────────┐ +# │ ↑ periodic ↑ │ ↑ periodic ↑ │ +# │ │ │ +# │ │ │ +# │ ========= │ ========= │ +# │ system #1 │ system #2 │ +# │ ========= │ ========= │ +# │ │ │ +# │ │ │ +# │ │ │ +# │ │ │ +# │ coupled -->│<-- coupled │ +# │ │ │ +# │<-- coupled │ coupled -->│ +# │ │ │ +# │ │ │ +# │ ↓ periodic ↓ │ ↓ periodic ↓ │ +# └────────────────────┴────────────────────┘ +# (-1, -1) ( 1, -1) + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +# First mesh is the left half of a [-1,1]^2 square +coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max1 = ( 0.0, 1.0) # maximum coordinates (max(x), max(y)) + +# Define identical resolution as a variable such that it is easier to change from `trixi_include` +cells_per_dimension = (8, 16) + +cells_per_dimension1 = cells_per_dimension + +mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) + +coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) +# coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u + +# A semidiscretization collects data structures and functions for the spatial discretization +semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, + boundary_conditions=( + # Connect left boundary with right boundary of right mesh + x_neg=BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1), + # Connect right boundary with left boundary of right mesh + x_pos=BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1), + y_neg=boundary_condition_periodic, + y_pos=boundary_condition_periodic)) + + +# Second mesh is the right half of a [-1,1]^2 square +coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) + +cells_per_dimension2 = cells_per_dimension + +mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) + +coupling_function2 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) +# coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u + +semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, + boundary_conditions=( + # Connect left boundary with right boundary of left mesh + x_neg=BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2), + # Connect right boundary with left boundary of left mesh + x_pos=BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2), + y_neg=boundary_condition_periodic, + y_pos=boundary_condition_periodic)) + +# Create a semidiscretization that bundles semi1 and semi2 +semi = SemidiscretizationCoupled(semi1, semi2) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 2.0 +ode = semidiscretize(semi, (0.0, 20.0)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback1 = AnalysisCallback(semi1, interval=100) +analysis_callback2 = AnalysisCallback(semi2, interval=100) +analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval=1, + solution_variables=cons2prim) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl=1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) + + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); + +# Print the timer summary +summary_callback() + +# Convert the snapshot to vtk data. +trixi2vtk("out/solution_*.h5") From 7355ed02d0425a7735154d839f6880c73f7dfcc1 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 3 Jul 2023 17:24:58 +0100 Subject: [PATCH 004/166] Cleaned up converter coupling elixir. --- .../elixir_advection_coupled_converter.jl | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl index 5546209aa19..8aada6bdb2b 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -1,10 +1,10 @@ using OrdinaryDiffEq using Trixi -using Trixi2Vtk - ############################################################################### -# Coupled semidiscretization of two linear advection systems, which are connected periodically +# Coupled semidiscretization of two linear advection systems using converter functions such that +# the upper half of the domain is coupled periodically, while the lower half is not coupled +# and any incoming wave is completely absorbed. # # In this elixir, we have a square domain that is divided into a left half and a right half. On each # half of the domain, a completely independent SemidiscretizationHyperbolic is created for the @@ -50,15 +50,19 @@ cells_per_dimension1 = cells_per_dimension mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) +# The user can define their own coupling functions. # coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1) + # A semidiscretization collects data structures and functions for the spatial discretization semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, boundary_conditions=( # Connect left boundary with right boundary of right mesh - x_neg=BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1), + x_neg=boundary_conditions_x_neg1, # Connect right boundary with left boundary of right mesh - x_pos=BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1), + x_pos=boundary_conditions_x_pos1, y_neg=boundary_condition_periodic, y_pos=boundary_condition_periodic)) @@ -74,12 +78,15 @@ mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) coupling_function2 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2) + semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, boundary_conditions=( # Connect left boundary with right boundary of left mesh - x_neg=BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2), + x_neg=boundary_conditions_x_neg2, # Connect right boundary with left boundary of left mesh - x_pos=BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2), + x_pos=boundary_conditions_x_pos2, y_neg=boundary_condition_periodic, y_pos=boundary_condition_periodic)) @@ -123,5 +130,3 @@ sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), # Print the timer summary summary_callback() -# Convert the snapshot to vtk data. -trixi2vtk("out/solution_*.h5") From b8cabf41796ed89fb8a233732884b2b94946174a Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 3 Jul 2023 17:25:17 +0100 Subject: [PATCH 005/166] Added equations in coupling converters. --- src/coupling_converters/coupling_converters_2d.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index e360ffc4eac..82ee854d826 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -17,7 +17,7 @@ c(x) = {c_0, for x \ge x_0 \times s Here, `s` is the sign of the step function, x_0 the save_position of the step and c_0 the amplitude. """ -function coupling_converter_heaviside_2d(x_0, c_0, s) +function coupling_converter_heaviside_2d(x_0, c_0, s, equations::LinearScalarAdvectionEquation2D) return (x, u) -> c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 * u end @@ -30,7 +30,7 @@ The coupling is given as a linear function. c(x) = x * u(x) ``` """ -function coupling_converter_linear_2d() +function coupling_converter_linear_2d(equations::LinearScalarAdvectionEquation2D) return (x, u) -> x[2]*u end From 6d521a7f292c8d6a78cfc616bf473c7f8128a06e Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 4 Jul 2023 14:48:28 +0100 Subject: [PATCH 006/166] Added converter functions. --- .../elixir_advection_coupled.jl | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 1e54e411db6..1e3146906b9 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -48,13 +48,15 @@ cells_per_dimension1 = cells_per_dimension mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) -# A semidiscretization collects data structures and functions for the spatial discretization +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_converter_identity(equations)) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_converter_identity(equations)) + semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, boundary_conditions=( # Connect left boundary with right boundary of right mesh - x_neg=BoundaryConditionCoupled(2, (:end, :i_forward), Float64), + x_neg=boundary_conditions_x_neg1, # Connect right boundary with left boundary of right mesh - x_pos=BoundaryConditionCoupled(2, (:begin, :i_forward), Float64), + x_pos=boundary_conditions_x_pos1, y_neg=boundary_condition_periodic, y_pos=boundary_condition_periodic)) @@ -67,12 +69,15 @@ cells_per_dimension2 = cells_per_dimension mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_converter_identity(equations)) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_converter_identity(equations)) + semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, boundary_conditions=( # Connect left boundary with right boundary of left mesh - x_neg=BoundaryConditionCoupled(1, (:end, :i_forward), Float64), + x_neg=boundary_conditions_x_neg2, # Connect right boundary with left boundary of left mesh - x_pos=BoundaryConditionCoupled(1, (:begin, :i_forward), Float64), + x_pos=boundary_conditions_x_pos2, y_neg=boundary_condition_periodic, y_pos=boundary_condition_periodic)) From ab8e7507fb103a2859d4a4234151297428ad37e1 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 4 Jul 2023 14:49:24 +0100 Subject: [PATCH 007/166] Added identity converter function. --- src/Trixi.jl | 3 ++- .../coupling_converters.jl | 25 +++++++++++++++++++ .../coupling_converters_2d.jl | 1 - test/test_structured_2d.jl | 7 ++++++ 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/src/Trixi.jl b/src/Trixi.jl index 0759ba129d2..613b45e5e1f 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -190,7 +190,8 @@ export boundary_condition_do_nothing, BoundaryConditionNavierStokesWall, NoSlip, Adiabatic, Isothermal, BoundaryConditionCoupled -export coupling_converter_heaviside_2d +export coupling_converter_identity, + coupling_converter_heaviside_2d, coupling_converter_linear_2d export initial_condition_convergence_test, source_terms_convergence_test export source_terms_harmonic diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index a82b849f284..53ebf900e41 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -5,6 +5,31 @@ @muladd begin #! format: noindent +@doc raw""" + coupling_converters + +Define converter functions for two coupled systems. +These should be used together with SemidiscretizationCoupled. +Using converter functions we can couple two systems that do not +share any variables. +This is done by taking the last inner point of system i, apply +a converter function on the state vector u_i and obtain a state +vector u_j for the boundary of system j. +""" + +@doc raw""" + Identity coupling converter function. + +The coupling is given as a linear function. +```math +c(x) = u(x) +``` +""" +function coupling_converter_identity(equations::AbstractEquations) + return (x, u) -> u +end + + #################################################################################################### # Include files with actual implementations for different systems of equations. diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index 82ee854d826..34011f4e1f7 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -5,7 +5,6 @@ @muladd begin #! format: noindent - @doc raw""" Coupling converter function for a system of two LinearScalarAdvectionEquation2D. diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 16fc72f0a46..d593f6e5a21 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -32,6 +32,13 @@ isdir(outdir) && rm(outdir, recursive=true) end end + @trixi_testset "elixir_advection_coupled_converters.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled_converters.jl"), + l2 = [0.97560175611287, 0.9707973849860191], + linf = [1.5703274355039958, 1.6235401582169442], + coverage_override = (maxiters=10^5,)) + end + @trixi_testset "elixir_advection_extended.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"), l2 = [4.220397559713772e-6], From 24773c7830f87b1b8775ec41d04c7809959de911 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 5 Jul 2023 15:59:58 +0100 Subject: [PATCH 008/166] Autoformat for converter coupling implementation. --- .../coupling_converters.jl | 2 -- .../coupling_converters_2d.jl | 9 +++---- .../semidiscretization_coupled.jl | 26 ++++++++++++------- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index 53ebf900e41..5856cb373ea 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -29,10 +29,8 @@ function coupling_converter_identity(equations::AbstractEquations) return (x, u) -> u end - #################################################################################################### # Include files with actual implementations for different systems of equations. include("coupling_converters_2d.jl") - end # @muladd diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index 34011f4e1f7..d527fe96894 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -16,11 +16,11 @@ c(x) = {c_0, for x \ge x_0 \times s Here, `s` is the sign of the step function, x_0 the save_position of the step and c_0 the amplitude. """ -function coupling_converter_heaviside_2d(x_0, c_0, s, equations::LinearScalarAdvectionEquation2D) - return (x, u) -> c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 * u +function coupling_converter_heaviside_2d(x_0, c_0, s, + equations::LinearScalarAdvectionEquation2D) + return (x, u) -> c_0 * (s * sign(x[2] - x_0) + 1.0) / 2.0 * u end - @doc raw""" Coupling converter function for a system of two LinearScalarAdvectionEquation2D. @@ -30,7 +30,6 @@ c(x) = x * u(x) ``` """ function coupling_converter_linear_2d(equations::LinearScalarAdvectionEquation2D) - return (x, u) -> x[2]*u + return (x, u) -> x[2] * u end - end # @muladd diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index babf4527a57..b24546394a5 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -367,13 +367,14 @@ BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64) mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices} # NDIMST2M1 == NDIMS * 2 - 1 # Buffer for boundary values: [variable, nodes_i, nodes_j, cell_i, cell_j] - u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 - other_semi_index :: Int - other_orientation :: Int - indices :: Indices - coupling_converter :: Function - - function BoundaryConditionCoupled(other_semi_index, indices, uEltype, coupling_converter) + u_boundary::Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 + other_semi_index::Int + other_orientation::Int + indices::Indices + coupling_converter::Function + + function BoundaryConditionCoupled(other_semi_index, indices, uEltype, + coupling_converter) NDIMS = length(indices) u_boundary = Array{uEltype, NDIMS * 2 - 1}(undef, ntuple(_ -> 0, NDIMS * 2 - 1)) @@ -386,7 +387,8 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic end new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices)}(u_boundary, other_semi_index, - other_orientation, indices, coupling_converter) + other_orientation, indices, + coupling_converter) end end @@ -496,8 +498,12 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ for i in eachnode(solver) for v in 1:size(u, 1) - x = cache.elements.node_coordinates[:, i_node, j_node, linear_indices[i_cell, j_cell]] - converted_u = boundary_condition.coupling_converter(x, u[:, i_node, j_node, linear_indices[i_cell, j_cell]]) + x = cache.elements.node_coordinates[:, i_node, j_node, + linear_indices[i_cell, j_cell]] + converted_u = boundary_condition.coupling_converter(x, + u[:, i_node, j_node, + linear_indices[i_cell, + j_cell]]) boundary_condition.u_boundary[v, i, cell] = converted_u[v] # boundary_condition.u_boundary[v, i, cell] = u[v, i_node, j_node, # linear_indices[i_cell, From c7da2cc809d507e17e39a27c781fcf6e0f78031a Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 5 Jul 2023 16:04:50 +0100 Subject: [PATCH 009/166] Added coupled converter elixir. --- .../elixir_advection_coupled_converter.jl | 76 ++++++++++--------- 1 file changed, 40 insertions(+), 36 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl index 8aada6bdb2b..18009ef4f92 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -36,11 +36,11 @@ advection_velocity = (0.2, -0.7) equations = LinearScalarAdvectionEquation2D(advection_velocity) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) # First mesh is the left half of a [-1,1]^2 square coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max1 = ( 0.0, 1.0) # maximum coordinates (max(x), max(y)) +coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) # Define identical resolution as a variable such that it is easier to change from `trixi_include` cells_per_dimension = (8, 16) @@ -53,23 +53,25 @@ coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # The user can define their own coupling functions. # coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u -boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1) -boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1) +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, + coupling_function1) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, + coupling_function1) # A semidiscretization collects data structures and functions for the spatial discretization -semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, - boundary_conditions=( - # Connect left boundary with right boundary of right mesh - x_neg=boundary_conditions_x_neg1, - # Connect right boundary with left boundary of right mesh - x_pos=boundary_conditions_x_pos1, - y_neg=boundary_condition_periodic, - y_pos=boundary_condition_periodic)) - +semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, + solver, + boundary_conditions = ( + # Connect left boundary with right boundary of right mesh + x_neg = boundary_conditions_x_neg1, + # Connect right boundary with left boundary of right mesh + x_pos = boundary_conditions_x_pos1, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic)) # Second mesh is the right half of a [-1,1]^2 square coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) +coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) cells_per_dimension2 = cells_per_dimension @@ -78,17 +80,20 @@ mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) coupling_function2 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u -boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2) -boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2) - -semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, - boundary_conditions=( - # Connect left boundary with right boundary of left mesh - x_neg=boundary_conditions_x_neg2, - # Connect right boundary with left boundary of left mesh - x_pos=boundary_conditions_x_pos2, - y_neg=boundary_condition_periodic, - y_pos=boundary_condition_periodic)) +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, + coupling_function2) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, + coupling_function2) + +semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, + solver, + boundary_conditions = ( + # Connect left boundary with right boundary of left mesh + x_neg = boundary_conditions_x_neg2, + # Connect right boundary with left boundary of left mesh + x_pos = boundary_conditions_x_pos2, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic)) # Create a semidiscretization that bundles semi1 and semi2 semi = SemidiscretizationCoupled(semi1, semi2) @@ -104,29 +109,28 @@ ode = semidiscretize(semi, (0.0, 20.0)); summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results -analysis_callback1 = AnalysisCallback(semi1, interval=100) -analysis_callback2 = AnalysisCallback(semi2, interval=100) +analysis_callback1 = AnalysisCallback(semi1, interval = 100) +analysis_callback2 = AnalysisCallback(semi2, interval = 100) analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) # The SaveSolutionCallback allows to save the solution to a file in regular intervals -save_solution = SaveSolutionCallback(interval=1, - solution_variables=cons2prim) +save_solution = SaveSolutionCallback(interval = 1, + solution_variables = cons2prim) # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step -stepsize_callback = StepsizeCallback(cfl=1.6) +stepsize_callback = StepsizeCallback(cfl = 1.6) # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) - +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, + stepsize_callback) ############################################################################### # run the simulation # OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks -sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), - dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep=false, callback=callbacks); +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); # Print the timer summary summary_callback() - From 98bf2cf462eab663e3de55aca25ce9eaff93ed7a Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 5 Jul 2023 16:06:47 +0100 Subject: [PATCH 010/166] Corrected file name of coupled converters test. --- test/test_structured_2d.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index d593f6e5a21..2182c197c8b 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -32,8 +32,8 @@ isdir(outdir) && rm(outdir, recursive=true) end end - @trixi_testset "elixir_advection_coupled_converters.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled_converters.jl"), + @trixi_testset "elixir_advection_coupled_converter.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled_converter.jl"), l2 = [0.97560175611287, 0.9707973849860191], linf = [1.5703274355039958, 1.6235401582169442], coverage_override = (maxiters=10^5,)) From d97302dc48df0561584094d42952a6b216550d6d Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 18 Jul 2023 16:36:05 +0100 Subject: [PATCH 011/166] Removed redundant doc string. --- src/coupling_converters/coupling_converters.jl | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index 5856cb373ea..3c2e08e34ac 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -5,18 +5,6 @@ @muladd begin #! format: noindent -@doc raw""" - coupling_converters - -Define converter functions for two coupled systems. -These should be used together with SemidiscretizationCoupled. -Using converter functions we can couple two systems that do not -share any variables. -This is done by taking the last inner point of system i, apply -a converter function on the state vector u_i and obtain a state -vector u_j for the boundary of system j. -""" - @doc raw""" Identity coupling converter function. From 4a22bfea3274641cdacceea286fd768d4d876ee8 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 18 Jul 2023 16:40:06 +0100 Subject: [PATCH 012/166] Added function signature in doc string. --- src/coupling_converters/coupling_converters.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index 3c2e08e34ac..b4ecb96bf62 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -6,6 +6,7 @@ #! format: noindent @doc raw""" + coupling_converter_identity(semi::AbstractSemidiscretization, tspan) Identity coupling converter function. The coupling is given as a linear function. From f1f6ee8970b13e2b09a09f31ea10507c2cf501b4 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 18 Jul 2023 16:44:57 +0100 Subject: [PATCH 013/166] Removed coverage_override in coupled tests. --- test/test_structured_2d.jl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 2182c197c8b..5875ef829ac 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -22,8 +22,7 @@ isdir(outdir) && rm(outdir, recursive=true) @trixi_testset "elixir_advection_coupled.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), l2 = [7.816742843181738e-6, 7.816742843196112e-6], - linf = [6.314906965543265e-5, 6.314906965410039e-5], - coverage_override = (maxiters=10^5,)) + linf = [6.314906965543265e-5, 6.314906965410039e-5]) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin errors = analysis_callback(sol) @@ -35,8 +34,7 @@ isdir(outdir) && rm(outdir, recursive=true) @trixi_testset "elixir_advection_coupled_converter.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled_converter.jl"), l2 = [0.97560175611287, 0.9707973849860191], - linf = [1.5703274355039958, 1.6235401582169442], - coverage_override = (maxiters=10^5,)) + linf = [1.5703274355039958, 1.6235401582169442]) end @trixi_testset "elixir_advection_extended.jl" begin From 7a7def09b11262b1f0da17dead4084f4081be10f Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 18 Jul 2023 16:46:25 +0100 Subject: [PATCH 014/166] Removed old commented code. --- src/semidiscretization/semidiscretization_coupled.jl | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index b24546394a5..940a1125983 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -505,9 +505,6 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ linear_indices[i_cell, j_cell]]) boundary_condition.u_boundary[v, i, cell] = converted_u[v] - # boundary_condition.u_boundary[v, i, cell] = u[v, i_node, j_node, - # linear_indices[i_cell, - # j_cell]] end i_node += i_node_step j_node += j_node_step From 9045c5d95674a41fd68e162ce41304bbed015d47 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 19 Jul 2023 12:21:56 +0100 Subject: [PATCH 015/166] Update make.jl Added interface coupling docs to the main menu. --- docs/make.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/make.jl b/docs/make.jl index 5069e4dc49a..59827f1e99b 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -113,6 +113,7 @@ makedocs( "Testing" => "testing.md", "Performance" => "performance.md", "Parallelization" => "parallelization.md", + "Coupling" => "coupling.md" ], "Troubleshooting and FAQ" => "troubleshooting.md", "Reference" => [ From 31cf07f42a7b60f976d53e8777a1c8794f22ef58 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 19 Jul 2023 12:27:20 +0100 Subject: [PATCH 016/166] Update make.jl Moved converter coupling section. --- docs/make.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/make.jl b/docs/make.jl index 59827f1e99b..a95581f0350 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -104,6 +104,7 @@ makedocs( ], "Time integration" => "time_integration.md", "Callbacks" => "callbacks.md", + "Coupling" => "coupling.md" ], "Advanced topics & developers" => [ "Conventions" =>"conventions.md", @@ -113,7 +114,6 @@ makedocs( "Testing" => "testing.md", "Performance" => "performance.md", "Parallelization" => "parallelization.md", - "Coupling" => "coupling.md" ], "Troubleshooting and FAQ" => "troubleshooting.md", "Reference" => [ From 2f19315ea8361a17c7ba2d37e783f574b239c0e4 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 19 Jul 2023 12:36:22 +0100 Subject: [PATCH 017/166] Create coupling.md --- docs/src/coupling.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/src/coupling.md diff --git a/docs/src/coupling.md b/docs/src/coupling.md new file mode 100644 index 00000000000..e0a9d916e88 --- /dev/null +++ b/docs/src/coupling.md @@ -0,0 +1 @@ +# [Coupling](@id coupling-id) From 26d8dd3871a163ace1e97168f453a8a6e85c7d57 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 19 Jul 2023 13:05:08 +0100 Subject: [PATCH 018/166] Update coupling.md Added some documentation on coupling converters. --- docs/src/coupling.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/src/coupling.md b/docs/src/coupling.md index e0a9d916e88..cce3594e140 100644 --- a/docs/src/coupling.md +++ b/docs/src/coupling.md @@ -1 +1,33 @@ # [Coupling](@id coupling-id) +A complex simulation can consist of different spatial domains in which +different equations are being solved, different numerical methods being used +or the grid structure is different. +One example would be a fluid in a tank and an extended hot plate attached to it. +We would then like to solve the Navier-Stokes equations in the fluid domain +and the heat conduction equations in the plate. +The coupling would happen at the interface through the exchange of thermal energy. + +Another type of coupling is bulk or volume coupling. +There we have at least two systems that share all or parts of the domain. +We could, for instance, have a Maxwell system and a fluid system. +The coupling would then occur through the Lorentz force. + + +## Converter Coupling +We can have the case where the two systems do not share any variables, but +share some of the physics. +Here, the same physics is just represented in a different form and with +different variables. +This is the case for a fluid system on one side and a Vlasov system on the other. +To translate the fields from one description to the other one needs to use +converter functions. + +In the general case we have one system with `m` variables `u_i` and another +system with `n` variables `v_j`. +We then define two coupling functions, one that transforms `u_i` into `v_i` +and one that goes the other way. + +In their minimal form they take the position vector `x` and state vector `u` +and return the transformed variables. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl` +and in `src/coupling_converters/coupling_converters_2d.jl`. From 05579d12722f440728f4e2fcba2ec5d43c33c76b Mon Sep 17 00:00:00 2001 From: SimonCan Date: Thu, 20 Jul 2023 11:24:19 +0100 Subject: [PATCH 019/166] Removed troublesome AnalysisCallbackCoupled from test. --- test/test_structured_2d.jl | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 5875ef829ac..5ffe73cbd34 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -23,12 +23,6 @@ isdir(outdir) && rm(outdir, recursive=true) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), l2 = [7.816742843181738e-6, 7.816742843196112e-6], linf = [6.314906965543265e-5, 6.314906965410039e-5]) - - @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin - errors = analysis_callback(sol) - @test errors.l2 ≈ [7.816742843181738e-6, 7.816742843196112e-6] rtol=1.0e-4 - @test errors.linf ≈ [6.314906965543265e-5, 6.314906965410039e-5] rtol=1.0e-4 - end end @trixi_testset "elixir_advection_coupled_converter.jl" begin From 84c872e1fcdd0612b1a162875d14b1d616387d71 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Thu, 20 Jul 2023 12:37:41 +0100 Subject: [PATCH 020/166] Chenged coupling converter function. --- .../structured_2d_dgsem/elixir_advection_coupled_converter.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl index 18009ef4f92..766fbff8a14 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -49,9 +49,8 @@ cells_per_dimension1 = cells_per_dimension mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) -coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # The user can define their own coupling functions. -# coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u +coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1) From 78f994874b28b70baf52f2b3a3e0942d69baa0ca Mon Sep 17 00:00:00 2001 From: SimonCan Date: Thu, 20 Jul 2023 12:46:26 +0100 Subject: [PATCH 021/166] Changed coupling converter function and updated tests. --- .../structured_2d_dgsem/elixir_advection_coupled_converter.jl | 3 +-- test/test_structured_2d.jl | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl index 766fbff8a14..0648952faaf 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -76,8 +76,7 @@ cells_per_dimension2 = cells_per_dimension mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) -coupling_function2 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) -# coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u +coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2) diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 5ffe73cbd34..3cc55aa8da7 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -27,8 +27,8 @@ isdir(outdir) && rm(outdir, recursive=true) @trixi_testset "elixir_advection_coupled_converter.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled_converter.jl"), - l2 = [0.97560175611287, 0.9707973849860191], - linf = [1.5703274355039958, 1.6235401582169442]) + l2 = [0.3495477674652473, 0.3472339065154432], + linf = [0.5569080960939969, 0.537610538307045]) end @trixi_testset "elixir_advection_extended.jl" begin From 2e350469ba58eaccb203cf3327354475929f17fc Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 24 Jul 2023 14:45:54 +0100 Subject: [PATCH 022/166] Sepcialized coupling function call. --- src/semidiscretization/semidiscretization_coupled.jl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 940a1125983..00818e40b69 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -364,14 +364,14 @@ BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64) !!! warning "Experimental code" This is an experimental feature and can change any time. """ -mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices} +mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices, CouplingFunction} # NDIMST2M1 == NDIMS * 2 - 1 # Buffer for boundary values: [variable, nodes_i, nodes_j, cell_i, cell_j] u_boundary::Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 other_semi_index::Int other_orientation::Int indices::Indices - coupling_converter::Function + coupling_converter::CouplingFunction function BoundaryConditionCoupled(other_semi_index, indices, uEltype, coupling_converter) @@ -386,9 +386,11 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic other_orientation = 3 end - new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices)}(u_boundary, other_semi_index, - other_orientation, indices, - coupling_converter) + new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices), typeof(coupling_converter)}(u_boundary, + other_semi_index, + other_orientation, + indices, + coupling_converter) end end From 6bc7c3be6fb72d9b8ac9695ee4b76014eecde5f7 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 14:56:15 +0100 Subject: [PATCH 023/166] Removed volume coupling from documentation to avoit confusion. --- docs/src/coupling.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/src/coupling.md b/docs/src/coupling.md index cce3594e140..a10b90f7ee8 100644 --- a/docs/src/coupling.md +++ b/docs/src/coupling.md @@ -7,11 +7,6 @@ We would then like to solve the Navier-Stokes equations in the fluid domain and the heat conduction equations in the plate. The coupling would happen at the interface through the exchange of thermal energy. -Another type of coupling is bulk or volume coupling. -There we have at least two systems that share all or parts of the domain. -We could, for instance, have a Maxwell system and a fluid system. -The coupling would then occur through the Lorentz force. - ## Converter Coupling We can have the case where the two systems do not share any variables, but From b4decb7a070209590a28c3704133fd9e7930ef83 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Tue, 25 Jul 2023 14:57:51 +0100 Subject: [PATCH 024/166] Update src/coupling_converters/coupling_converters.jl Co-authored-by: Hendrik Ranocha --- src/coupling_converters/coupling_converters.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index b4ecb96bf62..7e7d815d9a7 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -7,7 +7,8 @@ @doc raw""" coupling_converter_identity(semi::AbstractSemidiscretization, tspan) - Identity coupling converter function. + +Identity coupling converter function. The coupling is given as a linear function. ```math From 54d8180173953b9e4b0525d504c2da67488ac210 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 14:59:34 +0100 Subject: [PATCH 025/166] Removed redundant converter function for coupling. --- .../coupling_converters_2d.jl | 35 ------------------- 1 file changed, 35 deletions(-) delete mode 100644 src/coupling_converters/coupling_converters_2d.jl diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl deleted file mode 100644 index d527fe96894..00000000000 --- a/src/coupling_converters/coupling_converters_2d.jl +++ /dev/null @@ -1,35 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -@doc raw""" - Coupling converter function for a system of two LinearScalarAdvectionEquation2D. - -The coupling is given as a Heaviside step. -```math -c(x) = {c_0, for x \ge x_0 \times s - 0, for x < x_0 \times s} -``` -Here, `s` is the sign of the step function, x_0 the save_position -of the step and c_0 the amplitude. -""" -function coupling_converter_heaviside_2d(x_0, c_0, s, - equations::LinearScalarAdvectionEquation2D) - return (x, u) -> c_0 * (s * sign(x[2] - x_0) + 1.0) / 2.0 * u -end - -@doc raw""" - Coupling converter function for a system of two LinearScalarAdvectionEquation2D. - -The coupling is given as a linear function. -```math -c(x) = x * u(x) -``` -""" -function coupling_converter_linear_2d(equations::LinearScalarAdvectionEquation2D) - return (x, u) -> x[2] * u -end -end # @muladd From 160c7bf895d4639affedee70049e05309a63b427 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 15:00:33 +0100 Subject: [PATCH 026/166] Removed redundant coupling converter file mentioned in some files. --- docs/src/coupling.md | 3 +-- src/coupling_converters/coupling_converters.jl | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/src/coupling.md b/docs/src/coupling.md index a10b90f7ee8..10194801cab 100644 --- a/docs/src/coupling.md +++ b/docs/src/coupling.md @@ -24,5 +24,4 @@ and one that goes the other way. In their minimal form they take the position vector `x` and state vector `u` and return the transformed variables. -Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl` -and in `src/coupling_converters/coupling_converters_2d.jl`. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl`. diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index b4ecb96bf62..b97801883d6 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -21,5 +21,4 @@ end #################################################################################################### # Include files with actual implementations for different systems of equations. -include("coupling_converters_2d.jl") end # @muladd From cab33c274eec963344faecef0b8c46f6d7e395ab Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 15:25:40 +0100 Subject: [PATCH 027/166] Autoreformatted. --- src/semidiscretization/semidiscretization_coupled.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 00818e40b69..e16750fa32a 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -364,7 +364,8 @@ BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64) !!! warning "Experimental code" This is an experimental feature and can change any time. """ -mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices, CouplingFunction} +mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices, + CouplingFunction} # NDIMST2M1 == NDIMS * 2 - 1 # Buffer for boundary values: [variable, nodes_i, nodes_j, cell_i, cell_j] u_boundary::Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 From 024106c3d2f6c44d40c38f078a52e628c1626153 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 17:05:45 +0100 Subject: [PATCH 028/166] Removed old coupled elixir and replaced it with one using converter functions. --- docs/src/coupling.md | 2 +- .../elixir_advection_coupled.jl | 88 +++++++----- .../elixir_advection_coupled_converter.jl | 134 ------------------ test/test_structured_2d.jl | 6 - 4 files changed, 51 insertions(+), 179 deletions(-) delete mode 100644 examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl diff --git a/docs/src/coupling.md b/docs/src/coupling.md index 10194801cab..f715bfbdc0e 100644 --- a/docs/src/coupling.md +++ b/docs/src/coupling.md @@ -24,4 +24,4 @@ and one that goes the other way. In their minimal form they take the position vector `x` and state vector `u` and return the transformed variables. -Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl`. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled.jl`. diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 1e3146906b9..0648952faaf 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -1,9 +1,10 @@ using OrdinaryDiffEq using Trixi - ############################################################################### -# Coupled semidiscretization of two linear advection systems, which are connected periodically +# Coupled semidiscretization of two linear advection systems using converter functions such that +# the upper half of the domain is coupled periodically, while the lower half is not coupled +# and any incoming wave is completely absorbed. # # In this elixir, we have a square domain that is divided into a left half and a right half. On each # half of the domain, a completely independent SemidiscretizationHyperbolic is created for the @@ -35,11 +36,11 @@ advection_velocity = (0.2, -0.7) equations = LinearScalarAdvectionEquation2D(advection_velocity) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) # First mesh is the left half of a [-1,1]^2 square coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max1 = ( 0.0, 1.0) # maximum coordinates (max(x), max(y)) +coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) # Define identical resolution as a variable such that it is easier to change from `trixi_include` cells_per_dimension = (8, 16) @@ -48,38 +49,49 @@ cells_per_dimension1 = cells_per_dimension mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) -boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_converter_identity(equations)) -boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_converter_identity(equations)) - -semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, - boundary_conditions=( - # Connect left boundary with right boundary of right mesh - x_neg=boundary_conditions_x_neg1, - # Connect right boundary with left boundary of right mesh - x_pos=boundary_conditions_x_pos1, - y_neg=boundary_condition_periodic, - y_pos=boundary_condition_periodic)) - +# The user can define their own coupling functions. +coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u + +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, + coupling_function1) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, + coupling_function1) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, + solver, + boundary_conditions = ( + # Connect left boundary with right boundary of right mesh + x_neg = boundary_conditions_x_neg1, + # Connect right boundary with left boundary of right mesh + x_pos = boundary_conditions_x_pos1, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic)) # Second mesh is the right half of a [-1,1]^2 square coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) +coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) cells_per_dimension2 = cells_per_dimension mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) -boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_converter_identity(equations)) -boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_converter_identity(equations)) +coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u -semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, - boundary_conditions=( - # Connect left boundary with right boundary of left mesh - x_neg=boundary_conditions_x_neg2, - # Connect right boundary with left boundary of left mesh - x_pos=boundary_conditions_x_pos2, - y_neg=boundary_condition_periodic, - y_pos=boundary_condition_periodic)) +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, + coupling_function2) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, + coupling_function2) + +semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, + solver, + boundary_conditions = ( + # Connect left boundary with right boundary of left mesh + x_neg = boundary_conditions_x_neg2, + # Connect right boundary with left boundary of left mesh + x_pos = boundary_conditions_x_pos2, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic)) # Create a semidiscretization that bundles semi1 and semi2 semi = SemidiscretizationCoupled(semi1, semi2) @@ -88,35 +100,35 @@ semi = SemidiscretizationCoupled(semi1, semi2) # ODE solvers, callbacks etc. # Create ODE problem with time span from 0.0 to 2.0 -ode = semidiscretize(semi, (0.0, 2.0)); +ode = semidiscretize(semi, (0.0, 20.0)); # At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup # and resets the timers summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results -analysis_callback1 = AnalysisCallback(semi1, interval=100) -analysis_callback2 = AnalysisCallback(semi2, interval=100) +analysis_callback1 = AnalysisCallback(semi1, interval = 100) +analysis_callback2 = AnalysisCallback(semi2, interval = 100) analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) # The SaveSolutionCallback allows to save the solution to a file in regular intervals -save_solution = SaveSolutionCallback(interval=100, - solution_variables=cons2prim) +save_solution = SaveSolutionCallback(interval = 1, + solution_variables = cons2prim) # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step -stepsize_callback = StepsizeCallback(cfl=1.6) +stepsize_callback = StepsizeCallback(cfl = 1.6) # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) - +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, + stepsize_callback) ############################################################################### # run the simulation # OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks -sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), - dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep=false, callback=callbacks); +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); # Print the timer summary summary_callback() diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl deleted file mode 100644 index 0648952faaf..00000000000 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ /dev/null @@ -1,134 +0,0 @@ -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Coupled semidiscretization of two linear advection systems using converter functions such that -# the upper half of the domain is coupled periodically, while the lower half is not coupled -# and any incoming wave is completely absorbed. -# -# In this elixir, we have a square domain that is divided into a left half and a right half. On each -# half of the domain, a completely independent SemidiscretizationHyperbolic is created for the -# linear advection equations. The two systems are coupled in the x-direction and have periodic -# boundaries in the y-direction. For a high-level overview, see also the figure below: -# -# (-1, 1) ( 1, 1) -# ┌────────────────────┬────────────────────┐ -# │ ↑ periodic ↑ │ ↑ periodic ↑ │ -# │ │ │ -# │ │ │ -# │ ========= │ ========= │ -# │ system #1 │ system #2 │ -# │ ========= │ ========= │ -# │ │ │ -# │ │ │ -# │ │ │ -# │ │ │ -# │ coupled -->│<-- coupled │ -# │ │ │ -# │<-- coupled │ coupled -->│ -# │ │ │ -# │ │ │ -# │ ↓ periodic ↓ │ ↓ periodic ↓ │ -# └────────────────────┴────────────────────┘ -# (-1, -1) ( 1, -1) - -advection_velocity = (0.2, -0.7) -equations = LinearScalarAdvectionEquation2D(advection_velocity) - -# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - -# First mesh is the left half of a [-1,1]^2 square -coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) - -# Define identical resolution as a variable such that it is easier to change from `trixi_include` -cells_per_dimension = (8, 16) - -cells_per_dimension1 = cells_per_dimension - -mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) - -# The user can define their own coupling functions. -coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u - -boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, - coupling_function1) -boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, - coupling_function1) - -# A semidiscretization collects data structures and functions for the spatial discretization -semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, - solver, - boundary_conditions = ( - # Connect left boundary with right boundary of right mesh - x_neg = boundary_conditions_x_neg1, - # Connect right boundary with left boundary of right mesh - x_pos = boundary_conditions_x_pos1, - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Second mesh is the right half of a [-1,1]^2 square -coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) - -cells_per_dimension2 = cells_per_dimension - -mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) - -coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u - -boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, - coupling_function2) -boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, - coupling_function2) - -semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, - solver, - boundary_conditions = ( - # Connect left boundary with right boundary of left mesh - x_neg = boundary_conditions_x_neg2, - # Connect right boundary with left boundary of left mesh - x_pos = boundary_conditions_x_pos2, - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Create a semidiscretization that bundles semi1 and semi2 -semi = SemidiscretizationCoupled(semi1, semi2) - -############################################################################### -# ODE solvers, callbacks etc. - -# Create ODE problem with time span from 0.0 to 2.0 -ode = semidiscretize(semi, (0.0, 20.0)); - -# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup -# and resets the timers -summary_callback = SummaryCallback() - -# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results -analysis_callback1 = AnalysisCallback(semi1, interval = 100) -analysis_callback2 = AnalysisCallback(semi2, interval = 100) -analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) - -# The SaveSolutionCallback allows to save the solution to a file in regular intervals -save_solution = SaveSolutionCallback(interval = 1, - solution_variables = cons2prim) - -# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step -stepsize_callback = StepsizeCallback(cfl = 1.6) - -# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); - -# Print the timer summary -summary_callback() diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 3cc55aa8da7..634d243cb8b 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -20,12 +20,6 @@ isdir(outdir) && rm(outdir, recursive=true) end @trixi_testset "elixir_advection_coupled.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), - l2 = [7.816742843181738e-6, 7.816742843196112e-6], - linf = [6.314906965543265e-5, 6.314906965410039e-5]) - end - - @trixi_testset "elixir_advection_coupled_converter.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled_converter.jl"), l2 = [0.3495477674652473, 0.3472339065154432], linf = [0.5569080960939969, 0.537610538307045]) From c6d6da673db628870f4008209d040f4c4554fcf9 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 1 Aug 2023 10:04:16 +0100 Subject: [PATCH 029/166] Updated errors for coupled tests. --- test/test_structured_2d.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 75937ba82ad..6adce677ee4 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -23,8 +23,8 @@ isdir(outdir) && rm(outdir, recursive=true) @trixi_testset "elixir_advection_coupled.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), - l2 = [7.816742843181738e-6, 7.816742843196112e-6], - linf = [6.314906965543265e-5, 6.314906965410039e-5], + l2 = [0.3495477674652473, 0.3472339065154432], + linf = [0.5569080960939969, 0.537610538307045], coverage_override = (maxiters=10^5,)) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin From 62d1408797a8553cb9b1c392f568e9cd50b0763b Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 1 Aug 2023 15:07:03 +0100 Subject: [PATCH 030/166] Corrected test results for coupled equations. --- test/test_structured_2d.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 6adce677ee4..0bf0a3923f2 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -29,8 +29,8 @@ isdir(outdir) && rm(outdir, recursive=true) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin errors = analysis_callback(sol) - @test errors.l2 ≈ [7.816742843181738e-6, 7.816742843196112e-6] rtol=1.0e-4 - @test errors.linf ≈ [6.314906965543265e-5, 6.314906965410039e-5] rtol=1.0e-4 + @test errors.l2 ≈ [0.3495477674652473, 0.3472339065154432] rtol=1.0e-4 + @test errors.linf ≈ [0.5569080960939969, 0.537610538307045] rtol=1.0e-4 end end From 09fd6919c3a24643e61614af497927db85e410d8 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 2 Aug 2023 12:10:15 +0100 Subject: [PATCH 031/166] Corrected comment. --- examples/structured_2d_dgsem/elixir_advection_coupled.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 0648952faaf..b5eeb9543de 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -99,7 +99,7 @@ semi = SemidiscretizationCoupled(semi1, semi2) ############################################################################### # ODE solvers, callbacks etc. -# Create ODE problem with time span from 0.0 to 2.0 +# Create ODE problem with time span from 0.0 to 20.0 ode = semidiscretize(semi, (0.0, 20.0)); # At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup From df67f00d78bc389c6fc5c55422ffd271a2367498 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 2 Aug 2023 12:10:48 +0100 Subject: [PATCH 032/166] Removed coupled test from special tests. --- test/test_special_elixirs.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_special_elixirs.jl b/test/test_special_elixirs.jl index 23017059eaa..6cea1497ba1 100644 --- a/test/test_special_elixirs.jl +++ b/test/test_special_elixirs.jl @@ -32,6 +32,7 @@ coverage = occursin("--code-coverage", cmd) && !occursin("--code-coverage=none", @timed_testset "structured_2d_dgsem coupled" begin mean_convergence = convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "structured_2d_dgsem", "elixir_advection_coupled.jl"), 3) + println("mean_convergence = ", mean_convergence) @test isapprox(mean_convergence[1][:l2], [4.0], rtol=0.05) @test isapprox(mean_convergence[2][:l2], [4.0], rtol=0.05) end From 35980a1bcb3d87d485f529af525537ebfce9a477 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 2 Aug 2023 12:12:11 +0100 Subject: [PATCH 033/166] Removed coupled test from specials. --- test/test_special_elixirs.jl | 7 ------- 1 file changed, 7 deletions(-) diff --git a/test/test_special_elixirs.jl b/test/test_special_elixirs.jl index 6cea1497ba1..d97b99bf75a 100644 --- a/test/test_special_elixirs.jl +++ b/test/test_special_elixirs.jl @@ -30,13 +30,6 @@ coverage = occursin("--code-coverage", cmd) && !occursin("--code-coverage=none", @test isapprox(mean_convergence[:l2], [4.0], rtol=0.05) end - @timed_testset "structured_2d_dgsem coupled" begin - mean_convergence = convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "structured_2d_dgsem", "elixir_advection_coupled.jl"), 3) - println("mean_convergence = ", mean_convergence) - @test isapprox(mean_convergence[1][:l2], [4.0], rtol=0.05) - @test isapprox(mean_convergence[2][:l2], [4.0], rtol=0.05) - end - @timed_testset "p4est_2d_dgsem" begin # Run convergence test on unrefined mesh no_refine = @cfunction((p4est, which_tree, quadrant) -> Cint(0), Cint, (Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p4est_quadrant_t})) From 7d5ac5fcaacc855cfb267884c5504467d175efec Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 2 Aug 2023 16:39:22 +0100 Subject: [PATCH 034/166] Added memory allocation for p4est boundary. --- .../semidiscretization_coupled.jl | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index e16750fa32a..1f885352427 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -441,6 +441,24 @@ end function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2 }, direction, mesh, equations, dg::DGSEM) + @autoinfiltrate + if direction in (1, 2) + cell_size = size(mesh, 2) + else + cell_size = size(mesh, 1) + end + + uEltype = eltype(boundary_condition) + boundary_condition.u_boundary = Array{uEltype, 3}(undef, nvariables(equations), + nnodes(dg), + cell_size) +end + +# In 2D +function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2 + }, + direction, mesh::P4estMesh, equations, dg::DGSEM) + @autoinfiltrate if direction in (1, 2) cell_size = size(mesh, 2) else From 09ebb43aa73b5eeab1d962c986d37b2becbfdd44 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Thu, 3 Aug 2023 17:05:57 +0100 Subject: [PATCH 035/166] Corrected boundary memory allocation for p4est meshes. --- .../semidiscretization_coupled.jl | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 1f885352427..cbc182fccca 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -437,13 +437,14 @@ function allocate_coupled_boundary_condition(boundary_condition, direction, mesh return nothing end -# In 2D +# In 2D for a structured mesh. function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2 }, direction, mesh, equations, dg::DGSEM) - @autoinfiltrate + # Negative and positive x. if direction in (1, 2) cell_size = size(mesh, 2) + # Negative and positive y. else cell_size = size(mesh, 1) end @@ -454,15 +455,22 @@ function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditi cell_size) end -# In 2D +# In 2D for a p4est mesh. function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2 }, direction, mesh::P4estMesh, equations, dg::DGSEM) - @autoinfiltrate - if direction in (1, 2) - cell_size = size(mesh, 2) + # Negative x. + if direction == 1 + cell_size = sum(mesh.tree_node_coordinates[1, 1, 1, :] .== minimum(mesh.tree_node_coordinates[1, 1, 1, :])) + # Positive x. + elseif direction == 2 + cell_size = sum(mesh.tree_node_coordinates[1, 1, 1, :] .== maximum(mesh.tree_node_coordinates[1, 1, 1, :])) + # Negative y. + elseif direction == 3 + cell_size = sum(mesh.tree_node_coordinates[2, 1, 1, :] .== minimum(mesh.tree_node_coordinates[2, 1, 1, :])) + # Positive y. else - cell_size = size(mesh, 1) + cell_size = sum(mesh.tree_node_coordinates[2, 1, 1, :] .== maximum(mesh.tree_node_coordinates[2, 1, 1, :])) end uEltype = eltype(boundary_condition) From 40611402419e76bf3b08c3e9017054fa082d7b52 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Fri, 4 Aug 2023 18:42:12 +0100 Subject: [PATCH 036/166] Added fields to p4est mesh struct. Added options for p4est in coupled boundary copy. --- src/meshes/p4est_mesh.jl | 14 ++++++-- .../semidiscretization_coupled.jl | 32 +++++++++++++++---- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index 60db285e04f..87ef823185e 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -24,10 +24,14 @@ mutable struct P4estMesh{NDIMS, RealT <: Real, IsParallel, P, Ghost, NDIMSP2, NN current_filename::String unsaved_changes::Bool p4est_partition_allow_for_coarsening::Bool + coordinates_min::Tuple{RealT, RealT} + coordinates_max::Tuple{RealT, RealT} + trees_per_dimension::Tuple{Int, Int} function P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, boundary_names, current_filename, unsaved_changes, - p4est_partition_allow_for_coarsening) where {NDIMS} + p4est_partition_allow_for_coarsening, + coordinates_min, coordinates_max, trees_per_dimension) where {NDIMS} if NDIMS == 2 @assert p4est isa Ptr{p4est_t} elseif NDIMS == 3 @@ -57,7 +61,10 @@ mutable struct P4estMesh{NDIMS, RealT <: Real, IsParallel, P, Ghost, NDIMSP2, NN boundary_names, current_filename, unsaved_changes, - p4est_partition_allow_for_coarsening) + p4est_partition_allow_for_coarsening, + coordinates_min, + coordinates_max, + trees_per_dimension) # Destroy `p4est` structs when the mesh is garbage collected finalizer(destroy_mesh, mesh) @@ -215,7 +222,8 @@ function P4estMesh(trees_per_dimension; polydeg, return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, boundary_names, "", unsaved_changes, - p4est_partition_allow_for_coarsening) + p4est_partition_allow_for_coarsening, + coordinates_min, coordinates_max, trees_per_dimension) end # 2D version diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index cbc182fccca..2058c1c5aab 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -501,12 +501,25 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ u = wrap_array(get_system_u_ode(u_ode, other_semi_index, semi), mesh, equations, solver, cache) - linear_indices = LinearIndices(size(mesh)) + @autoinfiltrate + if mesh isa P4estMesh + linear_indices = LinearIndices(mesh.trees_per_dimension) + else + linear_indices = LinearIndices(size(mesh)) + end - if other_orientation == 1 - cells = axes(mesh, 2) - else # other_orientation == 2 - cells = axes(mesh, 1) + if mesh isa P4estMesh + if other_orientation == 1 + cells = mesh.trees_per_dimension[2] + else # other_orientation == 2 + cells = mesh.trees_per_dimension[1] + end + else + if other_orientation == 1 + cells = axes(mesh, 2) + else # other_orientation == 2 + cells = axes(mesh, 1) + end end # Copy solution data to the coupled boundary using "delayed indexing" with @@ -515,8 +528,13 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ i_node_start, i_node_step = index_to_start_step_2d(indices[1], node_index_range) j_node_start, j_node_step = index_to_start_step_2d(indices[2], node_index_range) - i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh, 1)) - j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh, 2)) + if mesh isa P4estMesh + i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], mesh.trees_per_dimension[1]) + j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], mesh.trees_per_dimension[2]) + else + i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh, 1)) + j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh, 2)) + end i_cell = i_cell_start j_cell = j_cell_start From 629d7499fa810736e8d4de2b50be8d450546ac9d Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 8 Aug 2023 10:58:17 +0100 Subject: [PATCH 037/166] Added ew struct members of p4est to the io. --- src/meshes/mesh_io.jl | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index b9895e7d454..fe13c329409 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -175,6 +175,9 @@ function save_mesh_file(mesh::P4estMesh, output_directory, timestep, # to increase the runtime performance # but HDF5 can only handle plain arrays file["boundary_names"] = mesh.boundary_names .|> String + file["coordinates_min"] = Vector(mesh.coordinates_min) + file["coordinates_max"] = Vector(mesh.coordinates_max) + file["trees_per_dimension"] = Vector(mesh.trees_per_dimension) end return filename @@ -215,6 +218,9 @@ function save_mesh_file(mesh::P4estMesh, output_directory, timestep, mpi_paralle # to increase the runtime performance # but HDF5 can only handle plain arrays file["boundary_names"] = mesh.boundary_names .|> String + file["coordinates_min"] = mesh.coordinates_min + file["coordinates_max"] = mesh.coordinates_max + file["trees_per_dimension"] = mesh.trees_per_dimension end return filename @@ -304,11 +310,15 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) unsaved_changes = false) elseif mesh_type == "P4estMesh" p4est_filename, tree_node_coordinates, - nodes, boundary_names_ = h5open(mesh_file, "r") do file + nodes, boundary_names_, coordinates_min, + coordinates_max, trees_per_dimension = h5open(mesh_file, "r") do file return read(attributes(file)["p4est_file"]), read(file["tree_node_coordinates"]), read(file["nodes"]), - read(file["boundary_names"]) + read(file["boundary_names"]), + read(file["coordinates_min"]), + read(file["coordinates_max"]), + read(file["trees_per_dimension"]) end boundary_names = boundary_names_ .|> Symbol @@ -320,7 +330,8 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) p4est = load_p4est(p4est_file, Val(ndims)) mesh = P4estMesh{ndims}(p4est, tree_node_coordinates, - nodes, boundary_names, "", false, true) + nodes, boundary_names, "", false, true, + coordinates_min, coordinates_max, trees_per_dimension) else error("Unknown mesh type!") end @@ -384,12 +395,16 @@ function load_mesh_parallel(mesh_file::AbstractString; n_cells_max, RealT) elseif mesh_type == "P4estMesh" if mpi_isroot() p4est_filename, tree_node_coordinates, - nodes, boundary_names_ = h5open(mesh_file, "r") do file + nodes, boundary_names_, coordinates_min, + coordinates_max, trees_per_dimension = h5open(mesh_file, "r") do file return read(attributes(file)["p4est_file"]), read(file["tree_node_coordinates"]), read(file["nodes"]), - read(file["boundary_names"]) - end + read(file["boundary_names"]), + read(file["coordinates_min"]), + read(file["coordinates_max"]), + read(file["trees_per_dimension"]) + end boundary_names = boundary_names_ .|> Symbol From 24e1418b620898bfa7b866ba401281a7061f0f1e Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 8 Aug 2023 10:58:42 +0100 Subject: [PATCH 038/166] Corrected array initialization. --- src/meshes/p4est_mesh.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index 87ef823185e..6c0aae81f6c 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -24,9 +24,9 @@ mutable struct P4estMesh{NDIMS, RealT <: Real, IsParallel, P, Ghost, NDIMSP2, NN current_filename::String unsaved_changes::Bool p4est_partition_allow_for_coarsening::Bool - coordinates_min::Tuple{RealT, RealT} - coordinates_max::Tuple{RealT, RealT} - trees_per_dimension::Tuple{Int, Int} + coordinates_min::SVector{NDIMS, RealT} + coordinates_max::SVector{NDIMS, RealT} + trees_per_dimension::SVector{NDIMS, Int} function P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, boundary_names, current_filename, unsaved_changes, From 701111d66ad36e92212939d1f22c221225ccf3d6 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 8 Aug 2023 11:23:17 +0100 Subject: [PATCH 039/166] Corrected LinearizedIndices for p4est. --- src/semidiscretization/semidiscretization_coupled.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 2058c1c5aab..96accad81f1 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -501,9 +501,8 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ u = wrap_array(get_system_u_ode(u_ode, other_semi_index, semi), mesh, equations, solver, cache) - @autoinfiltrate if mesh isa P4estMesh - linear_indices = LinearIndices(mesh.trees_per_dimension) + linear_indices = LinearIndices((mesh.trees_per_dimension[1], mesh.trees_per_dimension[2])) else linear_indices = LinearIndices(size(mesh)) end @@ -539,6 +538,7 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ i_cell = i_cell_start j_cell = j_cell_start + # @autoinfiltrate for cell in cells i_node = i_node_start j_node = j_node_start From 5c08cf4a4a7407d3ffa90182baafaf8c286b8c33 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 24 Nov 2023 10:45:07 +0100 Subject: [PATCH 040/166] Switch (back) to Einfeldt wave-speed estimate from paper (#1751) * Switch (back) to wave-speed estimate from paper * upadte test vals --- src/equations/ideal_glm_mhd_1d.jl | 2 +- test/test_tree_1d_mhd.jl | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/equations/ideal_glm_mhd_1d.jl b/src/equations/ideal_glm_mhd_1d.jl index a465571989b..eca06c8c203 100644 --- a/src/equations/ideal_glm_mhd_1d.jl +++ b/src/equations/ideal_glm_mhd_1d.jl @@ -285,7 +285,7 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, f_ll = flux(u_ll, orientation, equations) f_rr = flux(u_rr, orientation, equations) - SsL, SsR = min_max_speed_naive(u_ll, u_rr, orientation, equations) + SsL, SsR = min_max_speed_einfeldt(u_ll, u_rr, orientation, equations) sMu_L = SsL - v1_ll sMu_R = SsR - v1_rr if SsL >= 0 diff --git a/test/test_tree_1d_mhd.jl b/test/test_tree_1d_mhd.jl index 447572eee88..8895fe30e8b 100644 --- a/test/test_tree_1d_mhd.jl +++ b/test/test_tree_1d_mhd.jl @@ -210,15 +210,16 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_torrilhon_shock_tube.jl"), surface_flux=flux_hllc, l2=[ - 0.4574266553239646, 0.4794143154876439, 0.3407079689595056, - 0.44797768430829343, 0.9206916204424165, - 1.3216517820475193e-16, 0.2889748702415378, - 0.25529778018020927, + 0.45738965718253993, 0.479402222862685, 0.34069729746967664, + 0.44795514335568865, 0.9206813325913135, + 1.3216517820475193e-16, 0.2889672868491632, + 0.2552794220777942, ], linf=[ - 1.217943947570543, 0.8868438459815245, 0.878215340656725, - 0.9710882819266371, 1.6742759645320984, - 2.220446049250313e-16, 0.704710220504591, 0.6562122176458641, + 1.2181099854251536, 0.8869319941747589, 0.8763562906332134, + 0.9712221036087284, 1.6734231113527818, + 2.220446049250313e-16, 0.7035011427822779, + 0.6562884129650286, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) From d7e1f746462e806a317f005d4745c9c7db739d95 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 28 Nov 2023 10:28:45 +0100 Subject: [PATCH 041/166] Store capacity serial tree mesh (#1748) * Store capacity serial tree mesh * relevant changes * fmt * shorten * move capacity * Update src/meshes/mesh_io.jl Co-authored-by: Michael Schlottke-Lakemper --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Hendrik Ranocha --- src/meshes/mesh_io.jl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 92e38ce1bf3..337e33e6969 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -30,6 +30,7 @@ function save_mesh_file(mesh::TreeMesh, output_directory, timestep, attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["ndims"] = ndims(mesh) attributes(file)["n_cells"] = n_cells + attributes(file)["capacity"] = mesh.tree.capacity attributes(file)["n_leaf_cells"] = count_leaf_cells(mesh.tree) attributes(file)["minimum_level"] = minimum_level(mesh.tree) attributes(file)["maximum_level"] = maximum_level(mesh.tree) @@ -249,10 +250,10 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) end if mesh_type == "TreeMesh" - n_cells = h5open(mesh_file, "r") do file - return read(attributes(file)["n_cells"]) + capacity = h5open(mesh_file, "r") do file + return read(attributes(file)["capacity"]) end - mesh = TreeMesh(SerialTree{ndims}, max(n_cells, n_cells_max)) + mesh = TreeMesh(SerialTree{ndims}, max(n_cells_max, capacity)) load_mesh!(mesh, mesh_file) elseif mesh_type == "StructuredMesh" size_, mapping_as_string = h5open(mesh_file, "r") do file From 17d507071622a121afeaee810618ed0dc02bb5dd Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 1 Dec 2023 13:38:44 +0100 Subject: [PATCH 042/166] Use total pressure for 1D HLLC MHD (#1756) * Use total pressure * Update src/equations/ideal_glm_mhd_1d.jl Co-authored-by: Andrew Winters --------- Co-authored-by: Andrew Winters --- src/equations/ideal_glm_mhd_1d.jl | 20 +++++++++------ test/test_tree_1d_mhd.jl | 41 +++++++++++++++++++++++++------ 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/src/equations/ideal_glm_mhd_1d.jl b/src/equations/ideal_glm_mhd_1d.jl index eca06c8c203..5a523daf3f6 100644 --- a/src/equations/ideal_glm_mhd_1d.jl +++ b/src/equations/ideal_glm_mhd_1d.jl @@ -272,6 +272,10 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, rho_ll, v1_ll, v2_ll, v3_ll, p_ll, B1_ll, B2_ll, B3_ll = cons2prim(u_ll, equations) rho_rr, v1_rr, v2_rr, v3_rr, p_rr, B1_rr, B2_rr, B3_rr = cons2prim(u_rr, equations) + # Total pressure, i.e., thermal + magnetic pressures (eq. (12)) + p_tot_ll = p_ll + 0.5 * (B1_ll^2 + B2_ll^2 + B3_ll^2) + p_tot_rr = p_rr + 0.5 * (B1_rr^2 + B2_rr^2 + B3_rr^2) + # Conserved variables rho_v1_ll = u_ll[2] rho_v2_ll = u_ll[3] @@ -309,11 +313,11 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, else # Compute the "HLLC-speed", eq. (14) from paper mentioned above #= - SStar = (rho_rr * v1_rr * sMu_R - rho_ll * v1_ll * sMu_L + p_ll - p_rr - B1_ll^2 + B1_rr^2 ) / + SStar = (rho_rr * v1_rr * sMu_R - rho_ll * v1_ll * sMu_L + p_tot_ll - p_tot_rr - B1_ll^2 + B1_rr^2 ) / (rho_rr * sMu_R - rho_ll * sMu_L) =# # Simplification for 1D: B1 is constant - SStar = (rho_rr * v1_rr * sMu_R - rho_ll * v1_ll * sMu_L + p_ll - p_rr) / + SStar = (rho_rr * v1_rr * sMu_R - rho_ll * v1_ll * sMu_L + p_tot_ll - p_tot_rr) / (rho_rr * sMu_R - rho_ll * sMu_L) Sdiff = SsR - SsL @@ -347,12 +351,12 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, mom_3_Star = densStar * v3_ll - (B1Star * B3Star - B1_ll * B3_ll) / SdiffStar # (22) - #pstar = rho_ll * sMu_L * (SStar - v1_ll) + p_ll - B1_ll^2 + B1Star^2 # (17) + #p_tot_Star = rho_ll * sMu_L * (SStar - v1_ll) + p_tot_ll - B1_ll^2 + B1Star^2 # (17) # 1D B1 = constant => B1_ll = B1_rr = B1Star - pstar = rho_ll * sMu_L * (SStar - v1_ll) + p_ll # (17) + p_tot_Star = rho_ll * sMu_L * (SStar - v1_ll) + p_tot_ll # (17) enerStar = u_ll[5] * sMu_L / SdiffStar + - (pstar * SStar - p_ll * v1_ll - (B1Star * + (p_tot_Star * SStar - p_tot_ll * v1_ll - (B1Star * (B1Star * v1Star + B2Star * v2Star + B3Star * v3Star) - B1_ll * (B1_ll * v1_ll + B2_ll * v2_ll + B3_ll * v3_ll))) / SdiffStar # (23) @@ -377,12 +381,12 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, mom_3_Star = densStar * v3_rr - (B1Star * B3Star - B1_rr * B3_rr) / SdiffStar # (22) - #pstar = rho_rr * sMu_R * (SStar - v1_rr) + p_rr - B1_rr^2 + B1Star^2 # (17) + #p_tot_Star = rho_rr * sMu_R * (SStar - v1_rr) + p_tot_rr - B1_rr^2 + B1Star^2 # (17) # 1D B1 = constant => B1_ll = B1_rr = B1Star - pstar = rho_rr * sMu_R * (SStar - v1_rr) + p_rr # (17) + p_tot_Star = rho_rr * sMu_R * (SStar - v1_rr) + p_tot_rr # (17) enerStar = u_rr[5] * sMu_R / SdiffStar + - (pstar * SStar - p_rr * v1_rr - (B1Star * + (p_tot_Star * SStar - p_tot_rr * v1_rr - (B1Star * (B1Star * v1Star + B2Star * v2Star + B3Star * v3Star) - B1_rr * (B1_rr * v1_rr + B2_rr * v2_rr + B3_rr * v3_rr))) / SdiffStar # (23) diff --git a/test/test_tree_1d_mhd.jl b/test/test_tree_1d_mhd.jl index 8895fe30e8b..2150ddfd074 100644 --- a/test/test_tree_1d_mhd.jl +++ b/test/test_tree_1d_mhd.jl @@ -109,6 +109,31 @@ end end end +@trixi_testset "elixir_mhd_alfven_wave.jl with flux_hllc" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), + l2=[ + 1.036850596986597e-5, 1.965192583650368e-6, + 3.5882124656715505e-5, 3.5882124656638764e-5, + 5.270975504780837e-6, 1.1963224165731992e-16, + 3.595811808912869e-5, 3.5958118089159453e-5, + ], + linf=[ + 2.887280521446378e-5, 7.310580790352001e-6, + 0.00012390046377899755, 0.00012390046377787345, + 1.5102711136583125e-5, 2.220446049250313e-16, + 0.0001261935452181312, 0.0001261935452182006, + ], + surface_flux=flux_hllc) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_mhd_ec.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_ec.jl"), l2=[ @@ -210,16 +235,16 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_torrilhon_shock_tube.jl"), surface_flux=flux_hllc, l2=[ - 0.45738965718253993, 0.479402222862685, 0.34069729746967664, - 0.44795514335568865, 0.9206813325913135, - 1.3216517820475193e-16, 0.2889672868491632, - 0.2552794220777942, + 0.4573799618744708, 0.4792633358230866, 0.34064852506872795, + 0.4479668434955162, 0.9203891782415092, + 1.3216517820475193e-16, 0.28887826520860815, + 0.255281629265771, ], linf=[ - 1.2181099854251536, 0.8869319941747589, 0.8763562906332134, - 0.9712221036087284, 1.6734231113527818, - 2.220446049250313e-16, 0.7035011427822779, - 0.6562884129650286, + 1.2382842201671505, 0.8929169308132259, 0.871298623806198, + 0.9822415614542821, 1.6726170732132717, + 2.220446049250313e-16, 0.7016155888023747, + 0.6556091522071984, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) From 74aafa08e41ce1ffdfeae4c42d46a28e381e0375 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Mon, 4 Dec 2023 07:40:08 +0100 Subject: [PATCH 043/166] Update test vals of failing tests for upstream packages (#1760) * Update test vals * fmt --------- Co-authored-by: Hendrik Ranocha --- .../elixir_eulermulti_shock_bubble.jl | 6 +- ...ck_bubble_shockcapturing_subcell_minmax.jl | 6 +- ...ubble_shockcapturing_subcell_positivity.jl | 6 +- ext/TrixiMakieExt.jl | 7 +- src/auxiliary/precompile.jl | 58 +++++++--------- src/callbacks_step/averaging.jl | 3 +- .../euler_acoustics_coupling.jl | 7 +- src/callbacks_step/glm_speed_dg.jl | 7 +- src/callbacks_step/save_solution.jl | 6 +- src/callbacks_step/save_solution_dg.jl | 6 +- .../compressible_euler_multicomponent_1d.jl | 22 +++--- .../compressible_euler_multicomponent_2d.jl | 22 +++--- .../compressible_navier_stokes_1d.jl | 59 +++++----------- .../compressible_navier_stokes_2d.jl | 67 +++++-------------- .../compressible_navier_stokes_3d.jl | 59 +++++----------- .../ideal_glm_mhd_multicomponent_1d.jl | 22 +++--- .../ideal_glm_mhd_multicomponent_2d.jl | 22 +++--- src/equations/numerical_fluxes.jl | 5 +- src/meshes/unstructured_mesh.jl | 5 +- .../semidiscretization_coupled.jl | 8 +-- .../semidiscretization_hyperbolic.jl | 29 ++++---- ...semidiscretization_hyperbolic_parabolic.jl | 47 +++++++------ src/solvers/dgmulti/flux_differencing.jl | 12 ++-- .../dgmulti/flux_differencing_gauss_sbp.jl | 5 +- src/solvers/dgmulti/sbp.jl | 34 +++++----- src/solvers/dgmulti/types.jl | 53 +++++++-------- src/solvers/dgsem/basis_lobatto_legendre.jl | 6 +- src/solvers/dgsem_p4est/containers.jl | 3 +- .../dgsem_p4est/containers_parallel.jl | 3 +- .../dgsem_p4est/containers_parallel_2d.jl | 4 +- .../dgsem_p4est/containers_parallel_3d.jl | 4 +- src/solvers/dgsem_p4est/dg_2d_parabolic.jl | 3 +- src/solvers/dgsem_p4est/dg_parallel.jl | 3 +- src/solvers/dgsem_structured/containers.jl | 4 +- src/solvers/dgsem_tree/dg_1d_parabolic.jl | 12 ++-- src/solvers/dgsem_tree/dg_2d.jl | 4 +- src/solvers/dgsem_tree/dg_2d_parabolic.jl | 12 ++-- src/solvers/dgsem_tree/dg_3d.jl | 12 ++-- src/solvers/dgsem_tree/dg_3d_parabolic.jl | 12 ++-- src/solvers/dgsem_tree/indicators_1d.jl | 4 +- src/solvers/dgsem_tree/indicators_2d.jl | 4 +- src/solvers/dgsem_tree/subcell_limiters_2d.jl | 7 +- src/visualization/recipes_plots.jl | 7 +- test/test_tree_3d_fdsbp.jl | 4 +- 44 files changed, 263 insertions(+), 428 deletions(-) diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl index f5ef51c108a..c6ed07dcda1 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl @@ -17,10 +17,8 @@ A shock-bubble testcase for multicomponent Euler equations [arXiv: 1904.00972](https://arxiv.org/abs/1904.00972) """ function initial_condition_shock_bubble(x, t, - equations::CompressibleEulerMulticomponentEquations2D{ - 5, - 2 - }) + equations::CompressibleEulerMulticomponentEquations2D{5, + 2}) # bubble test case, see Gouasmi et al. https://arxiv.org/pdf/1904.00972 # other reference: https://www.researchgate.net/profile/Pep_Mulet/publication/222675930_A_flux-split_algorithm_applied_to_conservative_models_for_multicomponent_compressible_flows/links/568da54508aeaa1481ae7af0.pdf # typical domain is rectangular, we change it to a square, as Trixi can only do squares diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl index 3159a2066ad..4b606502ebe 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl @@ -17,10 +17,8 @@ A shock-bubble testcase for multicomponent Euler equations [arXiv: 1904.00972](https://arxiv.org/abs/1904.00972) """ function initial_condition_shock_bubble(x, t, - equations::CompressibleEulerMulticomponentEquations2D{ - 5, - 2 - }) + equations::CompressibleEulerMulticomponentEquations2D{5, + 2}) # bubble test case, see Gouasmi et al. https://arxiv.org/pdf/1904.00972 # other reference: https://www.researchgate.net/profile/Pep_Mulet/publication/222675930_A_flux-split_algorithm_applied_to_conservative_models_for_multicomponent_compressible_flows/links/568da54508aeaa1481ae7af0.pdf # typical domain is rectangular, we change it to a square, as Trixi can only do squares diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl index 7856c9bafbd..78ff47e255f 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl @@ -17,10 +17,8 @@ A shock-bubble testcase for multicomponent Euler equations [arXiv: 1904.00972](https://arxiv.org/abs/1904.00972) """ function initial_condition_shock_bubble(x, t, - equations::CompressibleEulerMulticomponentEquations2D{ - 5, - 2 - }) + equations::CompressibleEulerMulticomponentEquations2D{5, + 2}) # bubble test case, see Gouasmi et al. https://arxiv.org/pdf/1904.00972 # other reference: https://www.researchgate.net/profile/Pep_Mulet/publication/222675930_A_flux-split_algorithm_applied_to_conservative_models_for_multicomponent_compressible_flows/links/568da54508aeaa1481ae7af0.pdf # typical domain is rectangular, we change it to a square, as Trixi can only do squares diff --git a/ext/TrixiMakieExt.jl b/ext/TrixiMakieExt.jl index 8cd7576a6e5..301a7656da9 100644 --- a/ext/TrixiMakieExt.jl +++ b/ext/TrixiMakieExt.jl @@ -29,9 +29,7 @@ import Trixi: iplot, iplot! # First some utilities # Given a reference plotting triangulation, this function generates a plotting triangulation for # the entire global mesh. The output can be plotted using `Makie.mesh`. -function global_plotting_triangulation_makie(pds::PlotDataSeries{ - <:PlotData2DTriangulated - }; +function global_plotting_triangulation_makie(pds::PlotDataSeries{<:PlotData2DTriangulated}; set_z_coordinate_zero = false) @unpack variable_id = pds pd = pds.plot_data @@ -61,8 +59,7 @@ end # Returns a list of `Makie.Point`s which can be used to plot the mesh, or a solution "wireframe" # (e.g., a plot of the mesh lines but with the z-coordinate equal to the value of the solution). -function convert_PlotData2D_to_mesh_Points(pds::PlotDataSeries{<:PlotData2DTriangulated - }; +function convert_PlotData2D_to_mesh_Points(pds::PlotDataSeries{<:PlotData2DTriangulated}; set_z_coordinate_zero = false) @unpack variable_id = pds pd = pds.plot_data diff --git a/src/auxiliary/precompile.jl b/src/auxiliary/precompile.jl index 7ed0e26b5ef..9cec502f6cb 100644 --- a/src/auxiliary/precompile.jl +++ b/src/auxiliary/precompile.jl @@ -186,8 +186,7 @@ function _precompile_manual_() Matrix{RealT}, # DerivativeMatrix #StaticArrays.SArray{Tuple{nnodes_,nnodes_},RealT,2,nnodes_^2}, - Matrix{RealT} - } + Matrix{RealT}} end function mortar_type_dgsem(RealT, nnodes_) @@ -197,8 +196,7 @@ function _precompile_manual_() Matrix{RealT}, # ReverseMatrix # StaticArrays.SArray{Tuple{nnodes_,nnodes_},RealT,2,nnodes_^2}, - Matrix{RealT} - } + Matrix{RealT}} end function analyzer_type_dgsem(RealT, nnodes_) @@ -208,8 +206,7 @@ function _precompile_manual_() # VectorT StaticArrays.SVector{nnodes_analysis, RealT}, # Vandermonde - Array{RealT, 2} - } + Array{RealT, 2}} end function adaptor_type_dgsem(RealT, nnodes_) @@ -242,8 +239,8 @@ function _precompile_manual_() @assert Base.precompile(Tuple{Core.kwftype(typeof(Trixi.Type)), NamedTuple{(:initial_refinement_level, :n_cells_max), Tuple{Int, Int}}, Type{TreeMesh}, - Tuple{RealT, RealT, RealT}, Tuple{RealT, RealT, RealT - }}) + Tuple{RealT, RealT, RealT}, + Tuple{RealT, RealT, RealT}}) end for TreeType in (SerialTree, ParallelTree), NDIMS in 1:3 @assert Base.precompile(Tuple{typeof(Trixi.initialize!), @@ -308,8 +305,8 @@ function _precompile_manual_() Base.precompile(Tuple{Type{LobattoLegendreBasis}, Int}) for RealT in (Float64,) Base.precompile(Tuple{Type{LobattoLegendreBasis}, RealT, Int}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_dhat), Vector{RealT}, Vector{RealT} - }) + @assert Base.precompile(Tuple{typeof(Trixi.calc_dhat), Vector{RealT}, + Vector{RealT}}) @assert Base.precompile(Tuple{typeof(Trixi.calc_dsplit), Vector{RealT}, Vector{RealT}}) @assert Base.precompile(Tuple{typeof(Trixi.polynomial_derivative_matrix), @@ -332,10 +329,10 @@ function _precompile_manual_() @assert Base.precompile(Tuple{typeof(Trixi.calc_forward_lower), Int}) @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, Val{:gauss}}) @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, Val{:gauss}}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, Val{:gauss_lobatto - }}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, Val{:gauss_lobatto - }}) + @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, + Val{:gauss_lobatto}}) + @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, + Val{:gauss_lobatto}}) # Constructors: mortars, analyzers, adaptors for RealT in (Float64,), polydeg in 1:7 @@ -362,14 +359,12 @@ function _precompile_manual_() NamedTuple{(:interval, :save_final_restart), Tuple{Int, Bool}}, Type{SaveRestartCallback}}) @assert Base.precompile(Tuple{Core.kwftype(typeof(Trixi.Type)), - NamedTuple{ - (:interval, :save_initial_solution, + NamedTuple{(:interval, :save_initial_solution, :save_final_solution, :solution_variables), Tuple{Int, Bool, Bool, typeof(cons2cons)}}, Type{SaveSolutionCallback}}) @assert Base.precompile(Tuple{Core.kwftype(typeof(Trixi.Type)), - NamedTuple{ - (:interval, :save_initial_solution, + NamedTuple{(:interval, :save_initial_solution, :save_final_solution, :solution_variables), Tuple{Int, Bool, Bool, typeof(cons2prim)}}, Type{SaveSolutionCallback}}) @@ -385,8 +380,7 @@ function _precompile_manual_() # end # end @assert Base.precompile(Tuple{typeof(SummaryCallback)}) - @assert Base.precompile(Tuple{ - DiscreteCallback{typeof(Trixi.summary_callback), + @assert Base.precompile(Tuple{DiscreteCallback{typeof(Trixi.summary_callback), typeof(Trixi.summary_callback), typeof(Trixi.initialize_summary_callback), typeof(SciMLBase.FINALIZE_DEFAULT)}}) @@ -419,8 +413,8 @@ function _precompile_manual_() Trixi.ElementContainer2D{RealT, uEltype}}) @assert Base.precompile(Tuple{typeof(Trixi.init_mortars), Array{Int, 1}, TreeMesh{2, Trixi.SerialTree{2}}, - Trixi.ElementContainer2D{RealT, uEltype}, mortar_type - }) + Trixi.ElementContainer2D{RealT, uEltype}, + mortar_type}) @assert Base.precompile(Tuple{typeof(Trixi.save_mesh_file), TreeMesh{2, Trixi.SerialTree{2}}, String}) @@ -433,8 +427,8 @@ function _precompile_manual_() Trixi.ElementContainer2D{RealT, uEltype}}) @assert Base.precompile(Tuple{typeof(Trixi.init_mortars), Array{Int, 1}, TreeMesh{2, Trixi.ParallelTree{2}}, - Trixi.ElementContainer2D{RealT, uEltype}, mortar_type - }) + Trixi.ElementContainer2D{RealT, uEltype}, + mortar_type}) @assert Base.precompile(Tuple{typeof(Trixi.init_mpi_interfaces), Array{Int, 1}, TreeMesh{2, Trixi.ParallelTree{2}}, Trixi.ElementContainer2D{RealT, uEltype}}) @@ -450,8 +444,8 @@ function _precompile_manual_() Trixi.ElementContainer3D{RealT, uEltype}}) @assert Base.precompile(Tuple{typeof(Trixi.init_mortars), Array{Int, 1}, TreeMesh{3, Trixi.SerialTree{3}}, - Trixi.ElementContainer3D{RealT, uEltype}, mortar_type - }) + Trixi.ElementContainer3D{RealT, uEltype}, + mortar_type}) @assert Base.precompile(Tuple{typeof(Trixi.save_mesh_file), TreeMesh{3, Trixi.SerialTree{3}}, String}) end @@ -548,16 +542,10 @@ function _precompile_manual_() restart_callback_type}) for solution_variables in (cons2cons, cons2prim) - save_solution_callback_type = DiscreteCallback{ - SaveSolutionCallback{ - typeof(solution_variables) - }, - SaveSolutionCallback{ - typeof(solution_variables) - }, + save_solution_callback_type = DiscreteCallback{SaveSolutionCallback{typeof(solution_variables)}, + SaveSolutionCallback{typeof(solution_variables)}, typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT) - } + typeof(SciMLBase.FINALIZE_DEFAULT)} @assert Base.precompile(Tuple{typeof(show), Base.TTY, save_solution_callback_type}) @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, diff --git a/src/callbacks_step/averaging.jl b/src/callbacks_step/averaging.jl index 8d2dcfeaefe..efa71af9b91 100644 --- a/src/callbacks_step/averaging.jl +++ b/src/callbacks_step/averaging.jl @@ -52,8 +52,7 @@ function Base.show(io::IO, ::MIME"text/plain", end function AveragingCallback(semi::SemidiscretizationHyperbolic{<:Any, - <:CompressibleEulerEquations2D - }, + <:CompressibleEulerEquations2D}, tspan; output_directory = "out", filename = "averaging.h5") mesh, equations, solver, cache = mesh_equations_solver_cache(semi) mean_values = initialize_mean_values(mesh, equations, solver, cache) diff --git a/src/callbacks_step/euler_acoustics_coupling.jl b/src/callbacks_step/euler_acoustics_coupling.jl index ea33175d0c5..52dc55befdc 100644 --- a/src/callbacks_step/euler_acoustics_coupling.jl +++ b/src/callbacks_step/euler_acoustics_coupling.jl @@ -34,8 +34,8 @@ the [`AveragingCallback`](@ref). A direct-hybrid method for aeroacoustic analysis [DOI: 10.18154/RWTH-2017-04082](https://doi.org/10.18154/RWTH-2017-04082) """ -mutable struct EulerAcousticsCouplingCallback{RealT <: Real, MeanValues, IntegratorEuler - } +mutable struct EulerAcousticsCouplingCallback{RealT <: Real, MeanValues, + IntegratorEuler} stepsize_callback_acoustics::StepsizeCallback{RealT} stepsize_callback_euler::StepsizeCallback{RealT} mean_values::MeanValues @@ -85,8 +85,7 @@ The mean values for the acoustic perturbation equations are read from `averaging """ function EulerAcousticsCouplingCallback(ode_euler, averaging_callback::DiscreteCallback{<:Any, - <:AveragingCallback - }, + <:AveragingCallback}, alg, cfl_acoustics::Real, cfl_euler::Real; kwargs...) @unpack mean_values = averaging_callback.affect! diff --git a/src/callbacks_step/glm_speed_dg.jl b/src/callbacks_step/glm_speed_dg.jl index 0686c547a34..302aae356ab 100644 --- a/src/callbacks_step/glm_speed_dg.jl +++ b/src/callbacks_step/glm_speed_dg.jl @@ -7,8 +7,8 @@ function calc_dt_for_cleaning_speed(cfl::Real, mesh, equations::Union{AbstractIdealGlmMhdEquations, - AbstractIdealGlmMhdMulticomponentEquations - }, dg::DG, cache) + AbstractIdealGlmMhdMulticomponentEquations}, + dg::DG, cache) # compute time step for GLM linear advection equation with c_h=1 for the DG discretization on # Cartesian meshes max_scaled_speed_for_c_h = maximum(cache.elements.inverse_jacobian) * @@ -20,8 +20,7 @@ end function calc_dt_for_cleaning_speed(cfl::Real, mesh, equations::Union{AbstractIdealGlmMhdEquations, - AbstractIdealGlmMhdMulticomponentEquations - }, + AbstractIdealGlmMhdMulticomponentEquations}, dg::DGMulti, cache) rd = dg.basis md = mesh.md diff --git a/src/callbacks_step/save_solution.jl b/src/callbacks_step/save_solution.jl index 0092360cb20..c106fe69bcd 100644 --- a/src/callbacks_step/save_solution.jl +++ b/src/callbacks_step/save_solution.jl @@ -39,8 +39,7 @@ end function Base.show(io::IO, cb::DiscreteCallback{<:Any, - <:PeriodicCallbackAffect{<:SaveSolutionCallback - }}) + <:PeriodicCallbackAffect{<:SaveSolutionCallback}}) @nospecialize cb # reduce precompilation time save_solution_callback = cb.affect!.affect! @@ -71,8 +70,7 @@ end function Base.show(io::IO, ::MIME"text/plain", cb::DiscreteCallback{<:Any, - <:PeriodicCallbackAffect{<:SaveSolutionCallback - }}) + <:PeriodicCallbackAffect{<:SaveSolutionCallback}}) @nospecialize cb # reduce precompilation time if get(io, :compact, false) diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl index 7c015999035..350aee7336a 100644 --- a/src/callbacks_step/save_solution_dg.jl +++ b/src/callbacks_step/save_solution_dg.jl @@ -33,8 +33,7 @@ function save_solution_file(u, time, dt, timestep, # compute the solution variables via broadcasting, and reinterpret the # result as a plain array of floating point numbers data = Array(reinterpret(eltype(u), - solution_variables.(reinterpret(SVector{ - nvariables(equations), + solution_variables.(reinterpret(SVector{nvariables(equations), eltype(u)}, u), Ref(equations)))) @@ -116,8 +115,7 @@ function save_solution_file(u, time, dt, timestep, # compute the solution variables via broadcasting, and reinterpret the # result as a plain array of floating point numbers data = Array(reinterpret(eltype(u), - solution_variables.(reinterpret(SVector{ - nvariables(equations), + solution_variables.(reinterpret(SVector{nvariables(equations), eltype(u)}, u), Ref(equations)))) diff --git a/src/equations/compressible_euler_multicomponent_1d.jl b/src/equations/compressible_euler_multicomponent_1d.jl index 23ac222b976..8ddb0dcd08f 100644 --- a/src/equations/compressible_euler_multicomponent_1d.jl +++ b/src/equations/compressible_euler_multicomponent_1d.jl @@ -54,19 +54,15 @@ struct CompressibleEulerMulticomponentEquations1D{NVARS, NCOMP, RealT <: Real} < cv::SVector{NCOMP, RealT} cp::SVector{NCOMP, RealT} - function CompressibleEulerMulticomponentEquations1D{NVARS, NCOMP, RealT}(gammas::SVector{ - NCOMP, - RealT - }, - gas_constants::SVector{ - NCOMP, - RealT - }) where { - NVARS, - NCOMP, - RealT <: - Real - } + function CompressibleEulerMulticomponentEquations1D{NVARS, NCOMP, RealT}(gammas::SVector{NCOMP, + RealT}, + gas_constants::SVector{NCOMP, + RealT}) where { + NVARS, + NCOMP, + RealT <: + Real + } NCOMP >= 1 || throw(DimensionMismatch("`gammas` and `gas_constants` have to be filled with at least one value")) diff --git a/src/equations/compressible_euler_multicomponent_2d.jl b/src/equations/compressible_euler_multicomponent_2d.jl index ecd3bc80c0a..940d88b1aa5 100644 --- a/src/equations/compressible_euler_multicomponent_2d.jl +++ b/src/equations/compressible_euler_multicomponent_2d.jl @@ -58,19 +58,15 @@ struct CompressibleEulerMulticomponentEquations2D{NVARS, NCOMP, RealT <: Real} < cv::SVector{NCOMP, RealT} cp::SVector{NCOMP, RealT} - function CompressibleEulerMulticomponentEquations2D{NVARS, NCOMP, RealT}(gammas::SVector{ - NCOMP, - RealT - }, - gas_constants::SVector{ - NCOMP, - RealT - }) where { - NVARS, - NCOMP, - RealT <: - Real - } + function CompressibleEulerMulticomponentEquations2D{NVARS, NCOMP, RealT}(gammas::SVector{NCOMP, + RealT}, + gas_constants::SVector{NCOMP, + RealT}) where { + NVARS, + NCOMP, + RealT <: + Real + } NCOMP >= 1 || throw(DimensionMismatch("`gammas` and `gas_constants` have to be filled with at least one value")) diff --git a/src/equations/compressible_navier_stokes_1d.jl b/src/equations/compressible_navier_stokes_1d.jl index 73436c99b7c..d2c46ecc7d8 100644 --- a/src/equations/compressible_navier_stokes_1d.jl +++ b/src/equations/compressible_navier_stokes_1d.jl @@ -81,8 +81,7 @@ w_2 = \frac{\rho v1}{p},\, w_3 = -\frac{\rho}{p} ``` """ struct CompressibleNavierStokesDiffusion1D{GradientVariables, RealT <: Real, - E <: AbstractCompressibleEulerEquations{1} - } <: + E <: AbstractCompressibleEulerEquations{1}} <: AbstractCompressibleNavierStokesDiffusion{1, 3, GradientVariables} # TODO: parabolic # 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations @@ -130,14 +129,10 @@ end # we specialize this function to compute gradients of primitive variables instead of # conservative variables. -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) cons2prim end -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) cons2entropy end @@ -202,17 +197,13 @@ end # For CNS, it is simplest to formulate the viscous terms in primitive variables, so we transform the transformed # variables into primitive variables. @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) return u_transformed end # TODO: parabolic. Make this more efficient! @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) # note: this uses CompressibleNavierStokesDiffusion1D versions of cons2prim and entropy2cons return cons2prim(entropy2cons(u_transformed, equations), equations) end @@ -223,17 +214,13 @@ end # Note, the first component of `gradient_entropy_vars` contains gradient(rho) which is unused. # TODO: parabolic; entropy stable viscous terms @inline function convert_derivative_to_primitive(u, gradient, - ::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + ::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) return gradient end # the first argument is always the "transformed" variables. @inline function convert_derivative_to_primitive(w, gradient_entropy_vars, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) # TODO: parabolic. This is inefficient to pass in transformed variables but then transform them back. # We can fix this if we directly compute v1, v2, T from the entropy variables @@ -272,9 +259,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) return SVector(u_inner[1], v1, u_inner[3]) @@ -288,9 +273,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) # rho, v1, v2, _ = u_inner normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, @@ -310,9 +293,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) T = boundary_condition.boundary_condition_heat_flux.boundary_value_function(x, t, @@ -328,9 +309,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) return flux_inner end @@ -350,9 +329,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) negative_rho_inv_p = w_inner[3] # w_3 = -rho / p @@ -368,9 +345,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, equations) @@ -389,9 +364,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) T = boundary_condition.boundary_condition_heat_flux.boundary_value_function(x, t, @@ -410,9 +383,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) return SVector(flux_inner[1], flux_inner[2], flux_inner[3]) end end # @muladd diff --git a/src/equations/compressible_navier_stokes_2d.jl b/src/equations/compressible_navier_stokes_2d.jl index ad0db001872..5df7c01ca5c 100644 --- a/src/equations/compressible_navier_stokes_2d.jl +++ b/src/equations/compressible_navier_stokes_2d.jl @@ -81,8 +81,7 @@ w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = -\frac{\rho}{p} ``` """ struct CompressibleNavierStokesDiffusion2D{GradientVariables, RealT <: Real, - E <: AbstractCompressibleEulerEquations{2} - } <: + E <: AbstractCompressibleEulerEquations{2}} <: AbstractCompressibleNavierStokesDiffusion{2, 4, GradientVariables} # TODO: parabolic # 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations @@ -130,14 +129,10 @@ end # we specialize this function to compute gradients of primitive variables instead of # conservative variables. -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) cons2prim end -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) cons2entropy end @@ -224,17 +219,13 @@ end # For CNS, it is simplest to formulate the viscous terms in primitive variables, so we transform the transformed # variables into primitive variables. @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) return u_transformed end # TODO: parabolic. Make this more efficient! @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) # note: this uses CompressibleNavierStokesDiffusion2D versions of cons2prim and entropy2cons return cons2prim(entropy2cons(u_transformed, equations), equations) end @@ -245,17 +236,13 @@ end # Note, the first component of `gradient_entropy_vars` contains gradient(rho) which is unused. # TODO: parabolic; entropy stable viscous terms @inline function convert_derivative_to_primitive(u, gradient, - ::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + ::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) return gradient end # the first argument is always the "transformed" variables. @inline function convert_derivative_to_primitive(w, gradient_entropy_vars, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) # TODO: parabolic. This is inefficient to pass in transformed variables but then transform them back. # We can fix this if we directly compute v1, v2, T from the entropy variables @@ -310,9 +297,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -326,9 +311,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) # rho, v1, v2, _ = u_inner normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, @@ -348,9 +331,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -366,9 +347,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) return flux_inner end @@ -387,9 +366,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -406,9 +383,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, equations) @@ -427,9 +402,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -448,9 +421,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) return SVector(flux_inner[1], flux_inner[2], flux_inner[3], flux_inner[4]) end @@ -461,9 +432,7 @@ end normal::AbstractVector, x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) # BCs are usually specified as conservative variables so we convert them to primitive variables # because the gradients are assumed to be with respect to the primitive variables u_boundary = boundary_condition.boundary_value_function(x, t, equations) @@ -476,9 +445,7 @@ end normal::AbstractVector, x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) # for Dirichlet boundary conditions, we do not impose any conditions on the viscous fluxes return flux_inner end diff --git a/src/equations/compressible_navier_stokes_3d.jl b/src/equations/compressible_navier_stokes_3d.jl index c6a55983b53..e5567ae5789 100644 --- a/src/equations/compressible_navier_stokes_3d.jl +++ b/src/equations/compressible_navier_stokes_3d.jl @@ -81,8 +81,7 @@ w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = \frac{\rho v_3}{p} ``` """ struct CompressibleNavierStokesDiffusion3D{GradientVariables, RealT <: Real, - E <: AbstractCompressibleEulerEquations{3} - } <: + E <: AbstractCompressibleEulerEquations{3}} <: AbstractCompressibleNavierStokesDiffusion{3, 5, GradientVariables} # TODO: parabolic # 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations @@ -130,14 +129,10 @@ end # we specialize this function to compute gradients of primitive variables instead of # conservative variables. -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) cons2prim end -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) cons2entropy end @@ -250,17 +245,13 @@ end # For CNS, it is simplest to formulate the viscous terms in primitive variables, so we transform the transformed # variables into primitive variables. @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) return u_transformed end # TODO: parabolic. Make this more efficient! @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) # note: this uses CompressibleNavierStokesDiffusion3D versions of cons2prim and entropy2cons return cons2prim(entropy2cons(u_transformed, equations), equations) end @@ -271,17 +262,13 @@ end # Note, the first component of `gradient_entropy_vars` contains gradient(rho) which is unused. # TODO: parabolic; entropy stable viscous terms @inline function convert_derivative_to_primitive(u, gradient, - ::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + ::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) return gradient end # the first argument is always the "transformed" variables. @inline function convert_derivative_to_primitive(w, gradient_entropy_vars, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) # TODO: parabolic. This is inefficient to pass in transformed variables but then transform them back. # We can fix this if we directly compute v1, v2, v3, T from the entropy variables @@ -342,9 +329,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -358,9 +343,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) # rho, v1, v2, v3, _ = u_inner normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, @@ -381,9 +364,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -399,9 +380,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) return flux_inner end @@ -420,9 +399,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -439,9 +416,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, equations) @@ -461,9 +436,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -482,9 +455,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) return SVector(flux_inner[1], flux_inner[2], flux_inner[3], flux_inner[4], flux_inner[5]) end diff --git a/src/equations/ideal_glm_mhd_multicomponent_1d.jl b/src/equations/ideal_glm_mhd_multicomponent_1d.jl index 0efa6426448..dad7c27e86c 100644 --- a/src/equations/ideal_glm_mhd_multicomponent_1d.jl +++ b/src/equations/ideal_glm_mhd_multicomponent_1d.jl @@ -17,19 +17,15 @@ mutable struct IdealGlmMhdMulticomponentEquations1D{NVARS, NCOMP, RealT <: Real} cv::SVector{NCOMP, RealT} cp::SVector{NCOMP, RealT} - function IdealGlmMhdMulticomponentEquations1D{NVARS, NCOMP, RealT}(gammas::SVector{ - NCOMP, - RealT - }, - gas_constants::SVector{ - NCOMP, - RealT - }) where { - NVARS, - NCOMP, - RealT <: - Real - } + function IdealGlmMhdMulticomponentEquations1D{NVARS, NCOMP, RealT}(gammas::SVector{NCOMP, + RealT}, + gas_constants::SVector{NCOMP, + RealT}) where { + NVARS, + NCOMP, + RealT <: + Real + } NCOMP >= 1 || throw(DimensionMismatch("`gammas` and `gas_constants` have to be filled with at least one value")) diff --git a/src/equations/ideal_glm_mhd_multicomponent_2d.jl b/src/equations/ideal_glm_mhd_multicomponent_2d.jl index 9b0eeb411e8..a3a50c0485f 100644 --- a/src/equations/ideal_glm_mhd_multicomponent_2d.jl +++ b/src/equations/ideal_glm_mhd_multicomponent_2d.jl @@ -18,19 +18,15 @@ mutable struct IdealGlmMhdMulticomponentEquations2D{NVARS, NCOMP, RealT <: Real} cp::SVector{NCOMP, RealT} c_h::RealT # GLM cleaning speed - function IdealGlmMhdMulticomponentEquations2D{NVARS, NCOMP, RealT}(gammas::SVector{ - NCOMP, - RealT - }, - gas_constants::SVector{ - NCOMP, - RealT - }) where { - NVARS, - NCOMP, - RealT <: - Real - } + function IdealGlmMhdMulticomponentEquations2D{NVARS, NCOMP, RealT}(gammas::SVector{NCOMP, + RealT}, + gas_constants::SVector{NCOMP, + RealT}) where { + NVARS, + NCOMP, + RealT <: + Real + } NCOMP >= 1 || throw(DimensionMismatch("`gammas` and `gas_constants` have to be filled with at least one value")) diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 43be04f745d..44d523b6e89 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -198,10 +198,7 @@ function max_abs_speed_naive end end const FluxLaxFriedrichs{MaxAbsSpeed} = FluxPlusDissipation{typeof(flux_central), - DissipationLocalLaxFriedrichs{ - MaxAbsSpeed - } - } + DissipationLocalLaxFriedrichs{MaxAbsSpeed}} """ FluxLaxFriedrichs(max_abs_speed=max_abs_speed_naive) diff --git a/src/meshes/unstructured_mesh.jl b/src/meshes/unstructured_mesh.jl index c370c0f25f8..fae52f834b3 100644 --- a/src/meshes/unstructured_mesh.jl +++ b/src/meshes/unstructured_mesh.jl @@ -15,8 +15,9 @@ An unstructured (possibly curved) quadrilateral mesh. All mesh information, neighbour coupling, and boundary curve information is read in from a mesh file `filename`. """ -mutable struct UnstructuredMesh2D{RealT <: Real, CurvedSurfaceT <: CurvedSurface{RealT} - } <: AbstractMesh{2} +mutable struct UnstructuredMesh2D{RealT <: Real, + CurvedSurfaceT <: CurvedSurface{RealT}} <: + AbstractMesh{2} filename :: String n_corners :: Int n_surfaces :: Int # total number of surfaces diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 49763b12e5d..0941ae6a8ca 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -41,8 +41,9 @@ function SemidiscretizationCoupled(semis...) performance_counter = PerformanceCounter() - SemidiscretizationCoupled{typeof(semis), typeof(u_indices), typeof(performance_counter) - }(semis, u_indices, performance_counter) + SemidiscretizationCoupled{typeof(semis), typeof(u_indices), + typeof(performance_counter)}(semis, u_indices, + performance_counter) end function Base.show(io::IO, semi::SemidiscretizationCoupled) @@ -432,8 +433,7 @@ function allocate_coupled_boundary_condition(boundary_condition, direction, mesh end # In 2D -function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2 - }, +function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2}, direction, mesh, equations, dg::DGSEM) if direction in (1, 2) cell_size = size(mesh, 2) diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl index 9d465bfcc5f..7ebd758de37 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic.jl @@ -29,17 +29,18 @@ struct SemidiscretizationHyperbolic{Mesh, Equations, InitialCondition, performance_counter::PerformanceCounter function SemidiscretizationHyperbolic{Mesh, Equations, InitialCondition, - BoundaryConditions, SourceTerms, Solver, Cache - }(mesh::Mesh, equations::Equations, - initial_condition::InitialCondition, - boundary_conditions::BoundaryConditions, - source_terms::SourceTerms, - solver::Solver, - cache::Cache) where {Mesh, Equations, - InitialCondition, - BoundaryConditions, - SourceTerms, Solver, - Cache} + BoundaryConditions, SourceTerms, Solver, + Cache}(mesh::Mesh, equations::Equations, + initial_condition::InitialCondition, + boundary_conditions::BoundaryConditions, + source_terms::SourceTerms, + solver::Solver, + cache::Cache) where {Mesh, Equations, + InitialCondition, + BoundaryConditions, + SourceTerms, + Solver, + Cache} @assert ndims(mesh) == ndims(equations) performance_counter = PerformanceCounter() @@ -268,8 +269,7 @@ end function print_boundary_conditions(io, semi::SemiHypMeshBCSolver{<:Any, - <:UnstructuredSortedBoundaryTypes - }) + <:UnstructuredSortedBoundaryTypes}) @unpack boundary_conditions = semi @unpack boundary_dictionary = boundary_conditions summary_line(io, "boundary conditions", length(boundary_dictionary)) @@ -289,8 +289,7 @@ function print_boundary_conditions(io, semi::SemiHypMeshBCSolver{<:Any, <:NamedT end function print_boundary_conditions(io, - semi::SemiHypMeshBCSolver{ - <:Union{TreeMesh, + semi::SemiHypMeshBCSolver{<:Union{TreeMesh, StructuredMesh}, <:Union{Tuple, NamedTuple, AbstractArray}}) diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl index 35340d65b1e..0f44941390a 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl @@ -45,30 +45,29 @@ struct SemidiscretizationHyperbolicParabolic{Mesh, Equations, EquationsParabolic BoundaryConditionsParabolic, SourceTerms, Solver, SolverParabolic, Cache, - CacheParabolic - }(mesh::Mesh, - equations::Equations, - equations_parabolic::EquationsParabolic, - initial_condition::InitialCondition, - boundary_conditions::BoundaryConditions, - boundary_conditions_parabolic::BoundaryConditionsParabolic, - source_terms::SourceTerms, - solver::Solver, - solver_parabolic::SolverParabolic, - cache::Cache, - cache_parabolic::CacheParabolic) where { - Mesh, - Equations, - EquationsParabolic, - InitialCondition, - BoundaryConditions, - BoundaryConditionsParabolic, - SourceTerms, - Solver, - SolverParabolic, - Cache, - CacheParabolic - } + CacheParabolic}(mesh::Mesh, + equations::Equations, + equations_parabolic::EquationsParabolic, + initial_condition::InitialCondition, + boundary_conditions::BoundaryConditions, + boundary_conditions_parabolic::BoundaryConditionsParabolic, + source_terms::SourceTerms, + solver::Solver, + solver_parabolic::SolverParabolic, + cache::Cache, + cache_parabolic::CacheParabolic) where { + Mesh, + Equations, + EquationsParabolic, + InitialCondition, + BoundaryConditions, + BoundaryConditionsParabolic, + SourceTerms, + Solver, + SolverParabolic, + Cache, + CacheParabolic + } @assert ndims(mesh) == ndims(equations) # Todo: assert nvariables(equations)==nvariables(equations_parabolic) diff --git a/src/solvers/dgmulti/flux_differencing.jl b/src/solvers/dgmulti/flux_differencing.jl index 884a8fac43b..36aa50dff4e 100644 --- a/src/solvers/dgmulti/flux_differencing.jl +++ b/src/solvers/dgmulti/flux_differencing.jl @@ -88,8 +88,8 @@ end # Version for sparse operators and symmetric fluxes @inline function hadamard_sum!(du, - A::LinearAlgebra.Adjoint{<:Any, <:AbstractSparseMatrixCSC - }, + A::LinearAlgebra.Adjoint{<:Any, + <:AbstractSparseMatrixCSC}, flux_is_symmetric::True, volume_flux, orientation_or_normal_direction, u, equations) A_base = parent(A) # the adjoint of a SparseMatrixCSC is basically a SparseMatrixCSR @@ -122,8 +122,8 @@ end # Version for sparse operators and symmetric fluxes with curved meshes @inline function hadamard_sum!(du, - A::LinearAlgebra.Adjoint{<:Any, <:AbstractSparseMatrixCSC - }, + A::LinearAlgebra.Adjoint{<:Any, + <:AbstractSparseMatrixCSC}, flux_is_symmetric::True, volume_flux, normal_directions::AbstractVector{<:AbstractVector}, u, equations) @@ -161,8 +161,8 @@ end # TODO: DGMulti. Fix for curved meshes. # Version for sparse operators and non-symmetric fluxes @inline function hadamard_sum!(du, - A::LinearAlgebra.Adjoint{<:Any, <:AbstractSparseMatrixCSC - }, + A::LinearAlgebra.Adjoint{<:Any, + <:AbstractSparseMatrixCSC}, flux_is_symmetric::False, volume_flux, normal_direction::AbstractVector, u, equations) A_base = parent(A) # the adjoint of a SparseMatrixCSC is basically a SparseMatrixCSR diff --git a/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl b/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl index 2c5505cc4e9..9059caf87f6 100644 --- a/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl +++ b/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl @@ -380,9 +380,8 @@ function create_cache(mesh::DGMultiMesh, equations, # specialized operators to perform tensor product interpolation to faces for Gauss nodes interp_matrix_gauss_to_face = TensorProductGaussFaceOperator(Interpolation(), dg) - projection_matrix_gauss_to_face = TensorProductGaussFaceOperator(Projection{ - Static.False() - }(), dg) + projection_matrix_gauss_to_face = TensorProductGaussFaceOperator(Projection{Static.False()}(), + dg) # `LIFT` matrix for Gauss nodes - this is equivalent to `projection_matrix_gauss_to_face` scaled by `diagm(rd.wf)`, # where `rd.wf` are Gauss node face quadrature weights. diff --git a/src/solvers/dgmulti/sbp.jl b/src/solvers/dgmulti/sbp.jl index d434d3146ce..232555e18b5 100644 --- a/src/solvers/dgmulti/sbp.jl +++ b/src/solvers/dgmulti/sbp.jl @@ -40,26 +40,28 @@ end const DGMultiPeriodicFDSBP{NDIMS, ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, - VolumeIntegral - } where {NDIMS, ElemType, - ApproxType <: - SummationByPartsOperators.AbstractPeriodicDerivativeOperator, - SurfaceIntegral, - VolumeIntegral} + VolumeIntegral} where { + NDIMS, + ElemType, + ApproxType <: + SummationByPartsOperators.AbstractPeriodicDerivativeOperator, + SurfaceIntegral, + VolumeIntegral + } const DGMultiFluxDiffPeriodicFDSBP{NDIMS, ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, - VolumeIntegral - } where {NDIMS, - ElemType, - ApproxType <: - SummationByPartsOperators.AbstractPeriodicDerivativeOperator, - SurfaceIntegral <: - SurfaceIntegralWeakForm, - VolumeIntegral <: - VolumeIntegralFluxDifferencing - } + VolumeIntegral} where { + NDIMS, + ElemType, + ApproxType <: + SummationByPartsOperators.AbstractPeriodicDerivativeOperator, + SurfaceIntegral <: + SurfaceIntegralWeakForm, + VolumeIntegral <: + VolumeIntegralFluxDifferencing + } """ DGMultiMesh(dg::DGMulti) diff --git a/src/solvers/dgmulti/types.jl b/src/solvers/dgmulti/types.jl index ae1eed7fd52..813bc67061e 100644 --- a/src/solvers/dgmulti/types.jl +++ b/src/solvers/dgmulti/types.jl @@ -4,49 +4,46 @@ # `DGMulti` refers to both multiple DG types (polynomial/SBP, simplices/quads/hexes) as well as # the use of multi-dimensional operators in the solver. -const DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, VolumeIntegral} = DG{ - <:RefElemData{ - NDIMS, +const DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, VolumeIntegral} = DG{<:RefElemData{NDIMS, ElemType, - ApproxType - }, + ApproxType}, Mortar, SurfaceIntegral, - VolumeIntegral - } where { - Mortar - } + VolumeIntegral} where { + Mortar + } # Type aliases. The first parameter is `ApproxType` since it is more commonly used for dispatch. const DGMultiWeakForm{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, <:SurfaceIntegralWeakForm, - <:VolumeIntegralWeakForm - } where {NDIMS} + <:VolumeIntegralWeakForm} where {NDIMS + } const DGMultiFluxDiff{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, <:SurfaceIntegralWeakForm, - <:Union{ - VolumeIntegralFluxDifferencing, - VolumeIntegralShockCapturingHG - }} where {NDIMS} + <:Union{VolumeIntegralFluxDifferencing, + VolumeIntegralShockCapturingHG}} where { + NDIMS + } const DGMultiFluxDiffSBP{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, <:SurfaceIntegralWeakForm, - <:Union{ - VolumeIntegralFluxDifferencing, - VolumeIntegralShockCapturingHG - } - } where {NDIMS, - ApproxType <: Union{SBP, - AbstractDerivativeOperator - }} + <:Union{VolumeIntegralFluxDifferencing, + VolumeIntegralShockCapturingHG}} where { + NDIMS, + ApproxType <: + Union{SBP, + AbstractDerivativeOperator} + } const DGMultiSBP{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, - SurfaceIntegral, VolumeIntegral - } where {NDIMS, ElemType, - ApproxType <: Union{SBP, - AbstractDerivativeOperator}, - SurfaceIntegral, VolumeIntegral} + SurfaceIntegral, + VolumeIntegral} where {NDIMS, ElemType, + ApproxType <: + Union{SBP, + AbstractDerivativeOperator}, + SurfaceIntegral, + VolumeIntegral} # By default, Julia/LLVM does not use fused multiply-add operations (FMAs). # Since these FMAs can increase the performance of many numerical algorithms, diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 1b4e5446e44..6a92fd1c066 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -188,9 +188,9 @@ function MortarL2(basis::LobattoLegendreBasis) reverse_upper = Matrix{RealT}(reverse_upper_) reverse_lower = Matrix{RealT}(reverse_lower_) - LobattoLegendreMortarL2{RealT, nnodes_, typeof(forward_upper), typeof(reverse_upper) - }(forward_upper, forward_lower, - reverse_upper, reverse_lower) + LobattoLegendreMortarL2{RealT, nnodes_, typeof(forward_upper), + typeof(reverse_upper)}(forward_upper, forward_lower, + reverse_upper, reverse_lower) end function Base.show(io::IO, mortar::LobattoLegendreMortarL2) diff --git a/src/solvers/dgsem_p4est/containers.jl b/src/solvers/dgsem_p4est/containers.jl index 5fe68e06710..f9830d0011c 100644 --- a/src/solvers/dgsem_p4est/containers.jl +++ b/src/solvers/dgsem_p4est/containers.jl @@ -454,8 +454,7 @@ mutable struct InitSurfacesIterFaceUserData{Interfaces, Mortars, Boundaries, Mes end function InitSurfacesIterFaceUserData(interfaces, mortars, boundaries, mesh) - return InitSurfacesIterFaceUserData{ - typeof(interfaces), typeof(mortars), + return InitSurfacesIterFaceUserData{typeof(interfaces), typeof(mortars), typeof(boundaries), typeof(mesh)}(interfaces, 1, mortars, 1, boundaries, 1, diff --git a/src/solvers/dgsem_p4est/containers_parallel.jl b/src/solvers/dgsem_p4est/containers_parallel.jl index e7ee1f81478..7c7bd868457 100644 --- a/src/solvers/dgsem_p4est/containers_parallel.jl +++ b/src/solvers/dgsem_p4est/containers_parallel.jl @@ -266,8 +266,7 @@ end function ParallelInitSurfacesIterFaceUserData(interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars, mesh) - return ParallelInitSurfacesIterFaceUserData{ - typeof(interfaces), typeof(mortars), + return ParallelInitSurfacesIterFaceUserData{typeof(interfaces), typeof(mortars), typeof(boundaries), typeof(mpi_interfaces), typeof(mpi_mortars), typeof(mesh)}(interfaces, diff --git a/src/solvers/dgsem_p4est/containers_parallel_2d.jl b/src/solvers/dgsem_p4est/containers_parallel_2d.jl index 8c39e4a69c8..d531d33821b 100644 --- a/src/solvers/dgsem_p4est/containers_parallel_2d.jl +++ b/src/solvers/dgsem_p4est/containers_parallel_2d.jl @@ -6,9 +6,7 @@ #! format: noindent # Initialize node_indices of MPI interface container -@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{ - 2 - }, +@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{2}, faces, local_side, orientation, mpi_interface_id) # Align interface in positive coordinate direction of primary element. diff --git a/src/solvers/dgsem_p4est/containers_parallel_3d.jl b/src/solvers/dgsem_p4est/containers_parallel_3d.jl index be4e2bfbfc9..56f0a543b97 100644 --- a/src/solvers/dgsem_p4est/containers_parallel_3d.jl +++ b/src/solvers/dgsem_p4est/containers_parallel_3d.jl @@ -6,9 +6,7 @@ #! format: noindent # Initialize node_indices of MPI interface container -@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{ - 3 - }, +@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{3}, faces, local_side, orientation, mpi_interface_id) # Align interface at the primary element (primary element has surface indices (:i_forward, :j_forward)). diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index 9dd10df16ae..a7f3345168f 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -803,8 +803,7 @@ end function calc_boundary_flux_gradients!(cache, t, boundary_condition::Union{BoundaryConditionPeriodic, - BoundaryConditionDoNothing - }, + BoundaryConditionDoNothing}, mesh::P4estMesh, equations, surface_integral, dg::DG) @assert isempty(eachboundary(dg, cache)) end diff --git a/src/solvers/dgsem_p4est/dg_parallel.jl b/src/solvers/dgsem_p4est/dg_parallel.jl index 324bc7f3cd6..712ede2bfce 100644 --- a/src/solvers/dgsem_p4est/dg_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_parallel.jl @@ -342,8 +342,7 @@ function InitNeighborRankConnectivityIterFaceUserData(mpi_interfaces, mpi_mortar global_mortar_ids = fill(-1, nmpimortars(mpi_mortars)) neighbor_ranks_mortar = Vector{Vector{Int}}(undef, nmpimortars(mpi_mortars)) - return InitNeighborRankConnectivityIterFaceUserData{ - typeof(mpi_interfaces), + return InitNeighborRankConnectivityIterFaceUserData{typeof(mpi_interfaces), typeof(mpi_mortars), typeof(mesh)}(mpi_interfaces, 1, global_interface_ids, diff --git a/src/solvers/dgsem_structured/containers.jl b/src/solvers/dgsem_structured/containers.jl index 41eabf7c6bf..8adf005b782 100644 --- a/src/solvers/dgsem_structured/containers.jl +++ b/src/solvers/dgsem_structured/containers.jl @@ -5,8 +5,8 @@ @muladd begin #! format: noindent -struct ElementContainer{NDIMS, RealT <: Real, uEltype <: Real, NDIMSP1, NDIMSP2, NDIMSP3 - } +struct ElementContainer{NDIMS, RealT <: Real, uEltype <: Real, NDIMSP1, NDIMSP2, + NDIMSP3} # Physical coordinates at each node node_coordinates::Array{RealT, NDIMSP2} # [orientation, node_i, node_j, node_k, element] # ID of neighbor element in negative direction in orientation diff --git a/src/solvers/dgsem_tree/dg_1d_parabolic.jl b/src/solvers/dgsem_tree/dg_1d_parabolic.jl index 90007b05b3d..0017f9ca88e 100644 --- a/src/solvers/dgsem_tree/dg_1d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_1d_parabolic.jl @@ -290,10 +290,8 @@ function calc_boundary_flux_gradients!(cache, t, 2, firsts[2], lasts[2]) end -function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{ - <:Any, - 3 - }, +function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, + 3}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, @@ -358,10 +356,8 @@ function calc_boundary_flux_divergence!(cache, t, dg, cache, 2, firsts[2], lasts[2]) end -function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{ - <:Any, - 3 - }, +function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, + 3}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, diff --git a/src/solvers/dgsem_tree/dg_2d.jl b/src/solvers/dgsem_tree/dg_2d.jl index 7ecf4c00032..547ed352ef3 100644 --- a/src/solvers/dgsem_tree/dg_2d.jl +++ b/src/solvers/dgsem_tree/dg_2d.jl @@ -391,8 +391,8 @@ end @inline function fv_kernel!(du, u, mesh::Union{TreeMesh{2}, StructuredMesh{2}, - UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2} - }, + UnstructuredMesh2D, P4estMesh{2}, + T8codeMesh{2}}, nonconservative_terms, equations, volume_flux_fv, dg::DGSEM, cache, element, alpha = true) @unpack fstar1_L_threaded, fstar1_R_threaded, fstar2_L_threaded, fstar2_R_threaded = cache diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index 06abff5e85b..3083ae30680 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -384,10 +384,8 @@ function calc_boundary_flux_gradients!(cache, t, cache, 4, firsts[4], lasts[4]) end -function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{ - <:Any, - 4 - }, +function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, + 4}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, @@ -464,10 +462,8 @@ function calc_boundary_flux_divergence!(cache, t, dg, cache, 4, firsts[4], lasts[4]) end -function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{ - <:Any, - 4 - }, +function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, + 4}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, diff --git a/src/solvers/dgsem_tree/dg_3d.jl b/src/solvers/dgsem_tree/dg_3d.jl index 3364187e93c..0955dc38655 100644 --- a/src/solvers/dgsem_tree/dg_3d.jl +++ b/src/solvers/dgsem_tree/dg_3d.jl @@ -209,8 +209,8 @@ function rhs!(du, u, t, end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3} - }, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, + P4estMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralWeakForm, dg::DGSEM, cache) @@ -264,8 +264,8 @@ See also https://github.com/trixi-framework/Trixi.jl/issues/1671#issuecomment-17 end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3} - }, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, + P4estMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralFluxDifferencing, dg::DGSEM, cache) @@ -378,8 +378,8 @@ end # TODO: Taal dimension agnostic function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3} - }, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, + P4estMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralShockCapturingHG, dg::DGSEM, cache) diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index 2561c5fe5b0..9ad28c6aa8e 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -454,10 +454,8 @@ function calc_boundary_flux_gradients!(cache, t, 6, firsts[6], lasts[6]) end -function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{ - <:Any, - 5 - }, +function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, + 5}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, @@ -546,10 +544,8 @@ function calc_boundary_flux_divergence!(cache, t, dg, cache, 6, firsts[6], lasts[6]) end -function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{ - <:Any, - 5 - }, +function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, + 5}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, diff --git a/src/solvers/dgsem_tree/indicators_1d.jl b/src/solvers/dgsem_tree/indicators_1d.jl index 4006932352e..dff87bfe06c 100644 --- a/src/solvers/dgsem_tree/indicators_1d.jl +++ b/src/solvers/dgsem_tree/indicators_1d.jl @@ -29,8 +29,8 @@ end # full FV element. # # TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, 3 - }, +function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, + 3}, mesh, equations::ShallowWaterEquations1D, dg::DGSEM, cache; diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl index 8333bb515d3..fa8ed481eb9 100644 --- a/src/solvers/dgsem_tree/indicators_2d.jl +++ b/src/solvers/dgsem_tree/indicators_2d.jl @@ -33,8 +33,8 @@ end # full FV element. # # TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, 4 - }, +function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, + 4}, mesh, equations::ShallowWaterEquations2D, dg::DGSEM, cache; diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index bc69e55f264..384f4178bc9 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -8,10 +8,9 @@ # this method is used when the limiter is constructed as for shock-capturing volume integrals function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquations{2}, basis::LobattoLegendreBasis, bound_keys) - subcell_limiter_coefficients = Trixi.ContainerSubcellLimiterIDP2D{real(basis) - }(0, - nnodes(basis), - bound_keys) + subcell_limiter_coefficients = Trixi.ContainerSubcellLimiterIDP2D{real(basis)}(0, + nnodes(basis), + bound_keys) # Memory for bounds checking routine with `BoundsCheckCallback`. # The first entry of each vector contains the maximum deviation since the last export. diff --git a/src/visualization/recipes_plots.jl b/src/visualization/recipes_plots.jl index d15f7e542e1..0e9b5a66a8d 100644 --- a/src/visualization/recipes_plots.jl +++ b/src/visualization/recipes_plots.jl @@ -57,11 +57,8 @@ end # Visualize the mesh in a 2D plot # # Note: This is an experimental feature and may be changed in future releases without notice. -RecipesBase.@recipe function f(pm::PlotMesh{ - <:PlotData2DCartesian{<:Any, - <:AbstractVector{ - <:AbstractVector - }}}) +RecipesBase.@recipe function f(pm::PlotMesh{<:PlotData2DCartesian{<:Any, + <:AbstractVector{<:AbstractVector}}}) @unpack plot_data = pm @unpack x, y, mesh_vertices_x, mesh_vertices_y = plot_data diff --git a/test/test_tree_3d_fdsbp.jl b/test/test_tree_3d_fdsbp.jl index 16508df300e..e0e2bfe4b88 100644 --- a/test/test_tree_3d_fdsbp.jl +++ b/test/test_tree_3d_fdsbp.jl @@ -32,8 +32,8 @@ end xmax = 1.0, N = 10) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"), - l2=[1.3819894522373702e-8], - linf=[3.381866298113323e-8], + l2=[5.228248923012878e-9], + linf=[9.24430243465224e-9], D_SBP=D, initial_refinement_level=0, tspan=(0.0, 5.0)) From 80869fc89f2839101e2d21fd0ecd1a80c702ac22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 07:40:50 +0100 Subject: [PATCH 044/166] Bump crate-ci/typos from 1.16.21 to 1.16.23 (#1764) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.21 to 1.16.23. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.16.21...v1.16.23) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Hendrik Ranocha --- .github/workflows/SpellCheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index fb71fe45af9..366cb1183a0 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.16.21 + uses: crate-ci/typos@v1.16.23 From 75db52133087088e9dd1be4b5c515c35cbc8d967 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Mon, 4 Dec 2023 07:42:02 +0100 Subject: [PATCH 045/166] set version to v0.6.3 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 4e2a89dc133..77140fce78b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.3-pre" +version = "0.6.3" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 45e7dc24f975f201c06fec6c7272efb5fb31b8c5 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Mon, 4 Dec 2023 07:42:20 +0100 Subject: [PATCH 046/166] set development version to v0.6.4-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 77140fce78b..ace71a50d8b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.3" +version = "0.6.4-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 7a7ce48e5c5f174332b77a8943290e19c9805102 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 6 Dec 2023 11:13:01 +0100 Subject: [PATCH 047/166] Make error message for unknown boundary name more helpful (#1762) Co-authored-by: Daniel Doehring --- src/solvers/dgsem_unstructured/sort_boundary_conditions.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl b/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl index cad5542aae3..b5388cadc8b 100644 --- a/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl +++ b/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl @@ -56,7 +56,8 @@ function initialize!(boundary_types_container::UnstructuredSortedBoundaryTypes{N for key in keys(boundary_dictionary) if !(key in all_names) println(stderr, - "ERROR: Key $(repr(key)) is not a valid boundary name") + "ERROR: Key $(repr(key)) is not a valid boundary name. " * + "Valid names are $all_names.") MPI.Abort(mpi_comm(), 1) end end @@ -67,7 +68,8 @@ function initialize!(boundary_types_container::UnstructuredSortedBoundaryTypes{N else for key in keys(boundary_dictionary) if !(key in unique_names) - error("Key $(repr(key)) is not a valid boundary name") + error("Key $(repr(key)) is not a valid boundary name. " * + "Valid names are $unique_names.") end end end From 3d899bc3a301bdc40fc57afb95ea9e88ab10c459 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Wed, 6 Dec 2023 21:51:25 +0100 Subject: [PATCH 048/166] Make error norm calculation optional (#1755) * Make error norm calculation optional * test no errors * no errors * comment more clear * shorter version? * add text * update docstring * cut error passing * Update src/callbacks_step/analysis.jl Co-authored-by: Hendrik Ranocha * print in if clause * shorten * Update src/callbacks_step/analysis.jl Co-authored-by: Hendrik Ranocha * Add docstring for default_analysis_errors * Update src/equations/equations.jl --------- Co-authored-by: Hendrik Ranocha --- src/callbacks_step/analysis.jl | 54 ++++++++++++++++++++-------------- src/equations/equations.jl | 7 +++++ test/test_tree_1d_advection.jl | 14 +++++++++ 3 files changed, 53 insertions(+), 22 deletions(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index e5b4a01a885..ba232032951 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -23,6 +23,13 @@ Additional errors can be computed, e.g. by passing `extra_analysis_errors = (:l2_error_primitive, :linf_error_primitive)` or `extra_analysis_errors = (:conservation_error,)`. +If you want to omit the computation (to safe compute-time) of the [`default_analysis_errors`](@ref), specify +`analysis_errors = Symbol[]`. +Note: `default_analysis_errors` are `:l2_error` and `:linf_error` for all equations. +If you want to compute `extra_analysis_errors` such as `:conservation_error` solely, i.e., +without `:l2_error, :linf_error` you need to specify +`analysis_errors = [:conservation_error]` instead of `extra_analysis_errors = [:conservation_error]`. + Further scalar functions `func` in `extra_analysis_integrals` are applied to the numerical solution and integrated over the computational domain. Some examples for this are [`entropy`](@ref), [`energy_kinetic`](@ref), [`energy_internal`](@ref), and [`energy_total`](@ref). @@ -332,7 +339,8 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) @notimeit timer() integrator.f(du_ode, u_ode, semi, t) u = wrap_array(u_ode, mesh, equations, solver, cache) du = wrap_array(du_ode, mesh, equations, solver, cache) - l2_error, linf_error = analysis_callback(io, du, u, u_ode, t, semi) + # Compute l2_error, linf_error + analysis_callback(io, du, u, u_ode, t, semi) mpi_println("─"^100) mpi_println() @@ -354,8 +362,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) analysis_callback.start_time_last_analysis = time_ns() analysis_callback.ncalls_rhs_last_analysis = ncalls(semi.performance_counter) - # Return errors for EOC analysis - return l2_error, linf_error + return nothing end # This method is just called internally from `(analysis_callback::AnalysisCallback)(integrator)` @@ -377,28 +384,31 @@ function (analysis_callback::AnalysisCallback)(io, du, u, u_ode, t, semi) println() end - # Calculate L2/Linf errors, which are also returned - l2_error, linf_error = calc_error_norms(u_ode, t, analyzer, semi, cache_analysis) + if :l2_error in analysis_errors || :linf_error in analysis_errors + # Calculate L2/Linf errors + l2_error, linf_error = calc_error_norms(u_ode, t, analyzer, semi, + cache_analysis) - if mpi_isroot() - # L2 error - if :l2_error in analysis_errors - print(" L2 error: ") - for v in eachvariable(equations) - @printf(" % 10.8e", l2_error[v]) - @printf(io, " % 10.8e", l2_error[v]) + if mpi_isroot() + # L2 error + if :l2_error in analysis_errors + print(" L2 error: ") + for v in eachvariable(equations) + @printf(" % 10.8e", l2_error[v]) + @printf(io, " % 10.8e", l2_error[v]) + end + println() end - println() - end - # Linf error - if :linf_error in analysis_errors - print(" Linf error: ") - for v in eachvariable(equations) - @printf(" % 10.8e", linf_error[v]) - @printf(io, " % 10.8e", linf_error[v]) + # Linf error + if :linf_error in analysis_errors + print(" Linf error: ") + for v in eachvariable(equations) + @printf(" % 10.8e", linf_error[v]) + @printf(io, " % 10.8e", linf_error[v]) + end + println() end - println() end end @@ -477,7 +487,7 @@ function (analysis_callback::AnalysisCallback)(io, du, u, u_ode, t, semi) # additional integrals analyze_integrals(analysis_integrals, io, du, u, t, semi) - return l2_error, linf_error + return nothing end # Print level information only if AMR is enabled diff --git a/src/equations/equations.jl b/src/equations/equations.jl index ba2ad1cd1cd..582d672b756 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -260,7 +260,14 @@ combined with certain solvers (e.g., subcell limiting). function n_nonconservative_terms end have_constant_speed(::AbstractEquations) = False() +""" + default_analysis_errors(equations) + +Default analysis errors (`:l2_error` and `:linf_error`) used by the +[`AnalysisCallback`](@ref). +""" default_analysis_errors(::AbstractEquations) = (:l2_error, :linf_error) + """ default_analysis_integrals(equations) diff --git a/test/test_tree_1d_advection.jl b/test/test_tree_1d_advection.jl index 7cfd78e0ade..a580a3b5600 100644 --- a/test/test_tree_1d_advection.jl +++ b/test/test_tree_1d_advection.jl @@ -54,6 +54,20 @@ end end end +@trixi_testset "elixir_advection_basic.jl (No errors)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + analysis_callback=AnalysisCallback(semi, interval = 42, + analysis_errors = Symbol[])) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_advection_finite_volume.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_finite_volume.jl"), l2=[0.011662300515980219], From 034b1587807b5098e64043b4711604b481ba2186 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 13 Dec 2023 09:15:37 +0100 Subject: [PATCH 049/166] bump lower compat bound of TimerOutputs.jl to v0.5.7 (#1772) * bump lower compat bound of TimerOutputs.jl to v0.5.7 * format --- Project.toml | 2 +- .../semidiscretization_euler_acoustics.jl | 10 +++++----- .../semidiscretization_euler_gravity.jl | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Project.toml b/Project.toml index ace71a50d8b..65074c5a1ca 100644 --- a/Project.toml +++ b/Project.toml @@ -85,7 +85,7 @@ StrideArrays = "0.1.18" StructArrays = "0.6" SummationByPartsOperators = "0.5.41" T8code = "0.4.3" -TimerOutputs = "0.5" +TimerOutputs = "0.5.7" Triangulate = "2.0" TriplotBase = "0.1" TriplotRecipes = "0.1" diff --git a/src/semidiscretization/semidiscretization_euler_acoustics.jl b/src/semidiscretization/semidiscretization_euler_acoustics.jl index e49fe81177a..173523ff892 100644 --- a/src/semidiscretization/semidiscretization_euler_acoustics.jl +++ b/src/semidiscretization/semidiscretization_euler_acoustics.jl @@ -51,11 +51,11 @@ function SemidiscretizationEulerAcoustics(semi_acoustics::SemiAcoustics, semi_euler::SemiEuler; source_region = x -> true, weights = x -> 1.0) where - {Mesh, - SemiAcoustics <: - SemidiscretizationHyperbolic{Mesh, <:AbstractAcousticPerturbationEquations}, - SemiEuler <: - SemidiscretizationHyperbolic{Mesh, <:AbstractCompressibleEulerEquations}} + {Mesh, + SemiAcoustics <: + SemidiscretizationHyperbolic{Mesh, <:AbstractAcousticPerturbationEquations}, + SemiEuler <: + SemidiscretizationHyperbolic{Mesh, <:AbstractCompressibleEulerEquations}} cache = create_cache(SemidiscretizationEulerAcoustics, source_region, weights, mesh_equations_solver_cache(semi_acoustics)...) diff --git a/src/semidiscretization/semidiscretization_euler_gravity.jl b/src/semidiscretization/semidiscretization_euler_gravity.jl index a9a60a4ff04..4201344df80 100644 --- a/src/semidiscretization/semidiscretization_euler_gravity.jl +++ b/src/semidiscretization/semidiscretization_euler_gravity.jl @@ -117,11 +117,11 @@ Construct a semidiscretization of the compressible Euler equations with self-gra function SemidiscretizationEulerGravity(semi_euler::SemiEuler, semi_gravity::SemiGravity, parameters) where - {Mesh, - SemiEuler <: - SemidiscretizationHyperbolic{Mesh, <:AbstractCompressibleEulerEquations}, - SemiGravity <: - SemidiscretizationHyperbolic{Mesh, <:AbstractHyperbolicDiffusionEquations}} + {Mesh, + SemiEuler <: + SemidiscretizationHyperbolic{Mesh, <:AbstractCompressibleEulerEquations}, + SemiGravity <: + SemidiscretizationHyperbolic{Mesh, <:AbstractHyperbolicDiffusionEquations}} u_ode = compute_coefficients(zero(real(semi_gravity)), semi_gravity) du_ode = similar(u_ode) u_tmp1_ode = similar(u_ode) From 6acc09eaa815ad8cbcc0adaf5d160b3299daf071 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 13 Dec 2023 14:05:14 +0100 Subject: [PATCH 050/166] set version to v0.6.4 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 65074c5a1ca..8be83c7924f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.4-pre" +version = "0.6.4" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 766c7981fdfcbfcbb034888bb627a8f4668259eb Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 13 Dec 2023 14:05:26 +0100 Subject: [PATCH 051/166] set development version to v0.6.5-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 8be83c7924f..539dafc3034 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.4" +version = "0.6.5-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 14796ea607ad49f49e2258dac87ff30a6530595d Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Wed, 13 Dec 2023 16:40:21 +0100 Subject: [PATCH 052/166] Central SBP finite difference solver for `UnstructuredMesh2D` (#1773) * containers and kernels for FDSBP solver on UnstructuredMesh2D * add elixirs and corresponding tests * apply formatter to new and edited files * add advection equation test to up coverage * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * update variable name to N --------- Co-authored-by: Hendrik Ranocha --- .../elixir_advection_basic.jl | 69 ++++++ .../elixir_euler_free_stream.jl | 77 ++++++ .../elixir_euler_source_terms.jl | 65 ++++++ src/solvers/dg.jl | 8 +- .../dgsem_unstructured/containers_2d.jl | 15 +- src/solvers/fdsbp_tree/fdsbp_2d.jl | 2 +- .../fdsbp_unstructured/containers_2d.jl | 124 ++++++++++ src/solvers/fdsbp_unstructured/fdsbp.jl | 14 ++ src/solvers/fdsbp_unstructured/fdsbp_2d.jl | 219 ++++++++++++++++++ test/test_unstructured_2d.jl | 61 +++++ 10 files changed, 643 insertions(+), 11 deletions(-) create mode 100644 examples/unstructured_2d_fdsbp/elixir_advection_basic.jl create mode 100644 examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl create mode 100644 examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl create mode 100644 src/solvers/fdsbp_unstructured/containers_2d.jl create mode 100644 src/solvers/fdsbp_unstructured/fdsbp.jl create mode 100644 src/solvers/fdsbp_unstructured/fdsbp_2d.jl diff --git a/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl new file mode 100644 index 00000000000..c181203e7a4 --- /dev/null +++ b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl @@ -0,0 +1,69 @@ + +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +############################################################################### +# Get the FDSBP approximation operator + +D_SBP = derivative_operator(SummationByPartsOperators.MattssonAlmquistVanDerWeide2018Accurate(), + derivative_order = 1, accuracy_order = 4, + xmin = -1.0, xmax = 1.0, N = 15) +solver = FDSBP(D_SBP, + surface_integral = SurfaceIntegralStrongForm(flux_lax_friedrichs), + volume_integral = VolumeIntegralStrongForm()) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") +isfile(default_mesh_file) || + download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + default_mesh_file) +mesh_file = default_mesh_file + +mesh = UnstructuredMesh2D(mesh_file, periodicity = true) + +############################################################################### +# create the semidiscretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +ode = semidiscretize(semi, (0.0, 1.0)) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval = 100) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl new file mode 100644 index 00000000000..7ada50c0c65 --- /dev/null +++ b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl @@ -0,0 +1,77 @@ + +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +# Free-stream initial condition +initial_condition = initial_condition_constant + +# Boundary conditions for free-stream testing +boundary_condition_free_stream = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:Body => boundary_condition_free_stream, + :Button1 => boundary_condition_free_stream, + :Button2 => boundary_condition_free_stream, + :Eye1 => boundary_condition_free_stream, + :Eye2 => boundary_condition_free_stream, + :Smile => boundary_condition_free_stream, + :Bowtie => boundary_condition_free_stream) + +############################################################################### +# Get the FDSBP approximation space + +D_SBP = derivative_operator(SummationByPartsOperators.MattssonAlmquistVanDerWeide2018Accurate(), + derivative_order = 1, accuracy_order = 4, + xmin = -1.0, xmax = 1.0, N = 12) +solver = FDSBP(D_SBP, + surface_integral = SurfaceIntegralStrongForm(flux_hll), + volume_integral = VolumeIntegralStrongForm()) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +default_mesh_file = joinpath(@__DIR__, "mesh_gingerbread_man.mesh") +isfile(default_mesh_file) || + download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", + default_mesh_file) +mesh_file = default_mesh_file + +mesh = UnstructuredMesh2D(mesh_file) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true) + +callbacks = CallbackSet(summary_callback, analysis_callback, + alive_callback, save_solution) + +############################################################################### +# run the simulation + +# set small tolerances for the free-stream preservation test +sol = solve(ode, SSPRK43(), abstol = 1.0e-12, reltol = 1.0e-12, + save_everystep = false, callback = callbacks) +summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl new file mode 100644 index 00000000000..edcd221bf59 --- /dev/null +++ b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl @@ -0,0 +1,65 @@ + +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +initial_condition = initial_condition_convergence_test + +############################################################################### +# Get the FDSBP approximation operator + +D_SBP = derivative_operator(SummationByPartsOperators.MattssonNordström2004(), + derivative_order = 1, accuracy_order = 4, + xmin = -1.0, xmax = 1.0, N = 10) +solver = FDSBP(D_SBP, + surface_integral = SurfaceIntegralStrongForm(flux_lax_friedrichs), + volume_integral = VolumeIntegralStrongForm()) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") +isfile(default_mesh_file) || + download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + default_mesh_file) +mesh_file = default_mesh_file + +mesh = UnstructuredMesh2D(mesh_file, periodicity = true) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true) + +callbacks = CallbackSet(summary_callback, analysis_callback, + alive_callback, save_solution) + +############################################################################### +# run the simulation + +sol = solve(ode, SSPRK43(), abstol = 1.0e-9, reltol = 1.0e-9, + save_everystep = false, callback = callbacks) +summary_callback() # print the timer summary diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl index 9e5ebc7f9b5..9b61df62cc3 100644 --- a/src/solvers/dg.jl +++ b/src/solvers/dg.jl @@ -41,8 +41,8 @@ standard textbooks. Applications [doi: 10.1007/978-0-387-72067-8](https://doi.org/10.1007/978-0-387-72067-8) -`VolumeIntegralWeakForm()` is only implemented for conserved terms as -non-conservative terms should always be discretized in conjunction with a flux-splitting scheme, +`VolumeIntegralWeakForm()` is only implemented for conserved terms as +non-conservative terms should always be discretized in conjunction with a flux-splitting scheme, see [`VolumeIntegralFluxDifferencing`](@ref). This treatment is required to achieve, e.g., entropy-stability or well-balancedness. """ @@ -415,7 +415,8 @@ function Base.show(io::IO, mime::MIME"text/plain", dg::DG) summary_line(io, "surface integral", dg.surface_integral |> typeof |> nameof) show(increment_indent(io), mime, dg.surface_integral) summary_line(io, "volume integral", dg.volume_integral |> typeof |> nameof) - if !(dg.volume_integral isa VolumeIntegralWeakForm) + if !(dg.volume_integral isa VolumeIntegralWeakForm) && + !(dg.volume_integral isa VolumeIntegralStrongForm) show(increment_indent(io), mime, dg.volume_integral) end summary_footer(io) @@ -598,6 +599,7 @@ include("dgsem/dgsem.jl") # and boundary conditions weakly. Thus, these methods can re-use a lot of # functionality implemented for DGSEM. include("fdsbp_tree/fdsbp.jl") +include("fdsbp_unstructured/fdsbp.jl") function allocate_coefficients(mesh::AbstractMesh, equations, dg::DG, cache) # We must allocate a `Vector` in order to be able to `resize!` it (AMR). diff --git a/src/solvers/dgsem_unstructured/containers_2d.jl b/src/solvers/dgsem_unstructured/containers_2d.jl index 13eeaeabffb..f51dd09801b 100644 --- a/src/solvers/dgsem_unstructured/containers_2d.jl +++ b/src/solvers/dgsem_unstructured/containers_2d.jl @@ -45,7 +45,7 @@ end eachelement(elements::UnstructuredElementContainer2D) Return an iterator over the indices that specify the location in relevant data structures -for the elements in `elements`. +for the elements in `elements`. In particular, not the elements themselves are returned. """ @inline function eachelement(elements::UnstructuredElementContainer2D) @@ -84,24 +84,25 @@ function init_elements!(elements::UnstructuredElementContainer2D, mesh, basis) # loop through elements and call the correct constructor based on whether the element is curved for element in eachelement(elements) if mesh.element_is_curved[element] - init_element!(elements, element, basis.nodes, + init_element!(elements, element, basis, view(mesh.surface_curves, :, element)) else # straight sided element for i in 1:4, j in 1:2 # pull the (x,y) values of these corners out of the global corners array four_corners[i, j] = mesh.corners[j, mesh.element_node_ids[i, element]] end - init_element!(elements, element, basis.nodes, four_corners) + init_element!(elements, element, basis, four_corners) end end end # initialize all the values in the container of a general element (either straight sided or curved) -function init_element!(elements, element, nodes, corners_or_surface_curves) - calc_node_coordinates!(elements.node_coordinates, element, nodes, +function init_element!(elements, element, basis::LobattoLegendreBasis, + corners_or_surface_curves) + calc_node_coordinates!(elements.node_coordinates, element, get_nodes(basis), corners_or_surface_curves) - calc_metric_terms!(elements.jacobian_matrix, element, nodes, + calc_metric_terms!(elements.jacobian_matrix, element, get_nodes(basis), corners_or_surface_curves) calc_inverse_jacobian!(elements.inverse_jacobian, element, elements.jacobian_matrix) @@ -109,7 +110,7 @@ function init_element!(elements, element, nodes, corners_or_surface_curves) calc_contravariant_vectors!(elements.contravariant_vectors, element, elements.jacobian_matrix) - calc_normal_directions!(elements.normal_directions, element, nodes, + calc_normal_directions!(elements.normal_directions, element, get_nodes(basis), corners_or_surface_curves) return elements diff --git a/src/solvers/fdsbp_tree/fdsbp_2d.jl b/src/solvers/fdsbp_tree/fdsbp_2d.jl index beff605629a..09d18cecd75 100644 --- a/src/solvers/fdsbp_tree/fdsbp_2d.jl +++ b/src/solvers/fdsbp_tree/fdsbp_2d.jl @@ -9,7 +9,7 @@ #! format: noindent # 2D caches -function create_cache(mesh::TreeMesh{2}, equations, +function create_cache(mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, equations, volume_integral::VolumeIntegralStrongForm, dg, uEltype) prototype = Array{SVector{nvariables(equations), uEltype}, ndims(mesh)}(undef, ntuple(_ -> nnodes(dg), diff --git a/src/solvers/fdsbp_unstructured/containers_2d.jl b/src/solvers/fdsbp_unstructured/containers_2d.jl new file mode 100644 index 00000000000..3857c2d8a20 --- /dev/null +++ b/src/solvers/fdsbp_unstructured/containers_2d.jl @@ -0,0 +1,124 @@ +# !!! warning "Experimental implementation (curvilinear FDSBP)" +# This is an experimental feature and may change in future releases. + +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# initialize all the values in the container of a general FD block (either straight sided or curved) +# OBS! Requires the SBP derivative matrix in order to compute metric terms that are free-stream preserving +function init_element!(elements, element, basis::AbstractDerivativeOperator, + corners_or_surface_curves) + calc_node_coordinates!(elements.node_coordinates, element, get_nodes(basis), + corners_or_surface_curves) + + calc_metric_terms!(elements.jacobian_matrix, element, basis, + elements.node_coordinates) + + calc_inverse_jacobian!(elements.inverse_jacobian, element, elements.jacobian_matrix) + + calc_contravariant_vectors!(elements.contravariant_vectors, element, + elements.jacobian_matrix) + + calc_normal_directions!(elements.normal_directions, element, + elements.jacobian_matrix) + + return elements +end + +# construct the metric terms for a FDSBP element "block". Directly use the derivative matrix +# applied to the node coordinates. +# TODO: FD; How to make this work for the upwind solver because basis has three available derivative matrices +function calc_metric_terms!(jacobian_matrix, element, D_SBP::AbstractDerivativeOperator, + node_coordinates) + + # storage format: + # jacobian_matrix[1,1,:,:,:] <- X_xi + # jacobian_matrix[1,2,:,:,:] <- X_eta + # jacobian_matrix[2,1,:,:,:] <- Y_xi + # jacobian_matrix[2,2,:,:,:] <- Y_eta + + # Compute the xi derivatives by applying D on the left + # This is basically the same as + # jacobian_matrix[1, 1, :, :, element] = Matrix(D_SBP) * node_coordinates[1, :, :, element] + # but uses only matrix-vector products instead of a matrix-matrix product. + for j in eachnode(D_SBP) + mul!(view(jacobian_matrix, 1, 1, :, j, element), D_SBP, + view(node_coordinates, 1, :, j, element)) + end + # jacobian_matrix[2, 1, :, :, element] = Matrix(D_SBP) * node_coordinates[2, :, :, element] + for j in eachnode(D_SBP) + mul!(view(jacobian_matrix, 2, 1, :, j, element), D_SBP, + view(node_coordinates, 2, :, j, element)) + end + + # Compute the eta derivatives by applying transpose of D on the right + # jacobian_matrix[1, 2, :, :, element] = node_coordinates[1, :, :, element] * Matrix(D_SBP)' + for i in eachnode(D_SBP) + mul!(view(jacobian_matrix, 1, 2, i, :, element), D_SBP, + view(node_coordinates, 1, i, :, element)) + end + # jacobian_matrix[2, 2, :, :, element] = node_coordinates[2, :, :, element] * Matrix(D_SBP)' + for i in eachnode(D_SBP) + mul!(view(jacobian_matrix, 2, 2, i, :, element), D_SBP, + view(node_coordinates, 2, i, :, element)) + end + + return jacobian_matrix +end + +# construct the normal direction vectors (but not actually normalized) for a curved sided FDSBP element "block" +# normalization occurs on the fly during the surface flux computation +# OBS! This assumes that the boundary points are included. +function calc_normal_directions!(normal_directions, element, jacobian_matrix) + + # normal directions on the boundary for the left (local side 4) and right (local side 2) + N = size(jacobian_matrix, 4) + for j in 1:N + # +x side or side 2 in the local indexing + X_xi = jacobian_matrix[1, 1, N, j, element] + X_eta = jacobian_matrix[1, 2, N, j, element] + Y_xi = jacobian_matrix[2, 1, N, j, element] + Y_eta = jacobian_matrix[2, 2, N, j, element] + Jtemp = X_xi * Y_eta - X_eta * Y_xi + normal_directions[1, j, 2, element] = sign(Jtemp) * (Y_eta) + normal_directions[2, j, 2, element] = sign(Jtemp) * (-X_eta) + + # -x side or side 4 in the local indexing + X_xi = jacobian_matrix[1, 1, 1, j, element] + X_eta = jacobian_matrix[1, 2, 1, j, element] + Y_xi = jacobian_matrix[2, 1, 1, j, element] + Y_eta = jacobian_matrix[2, 2, 1, j, element] + Jtemp = X_xi * Y_eta - X_eta * Y_xi + normal_directions[1, j, 4, element] = -sign(Jtemp) * (Y_eta) + normal_directions[2, j, 4, element] = -sign(Jtemp) * (-X_eta) + end + + # normal directions on the boundary for the top (local side 3) and bottom (local side 1) + N = size(jacobian_matrix, 3) + for i in 1:N + # -y side or side 1 in the local indexing + X_xi = jacobian_matrix[1, 1, i, 1, element] + X_eta = jacobian_matrix[1, 2, i, 1, element] + Y_xi = jacobian_matrix[2, 1, i, 1, element] + Y_eta = jacobian_matrix[2, 2, i, 1, element] + Jtemp = X_xi * Y_eta - X_eta * Y_xi + normal_directions[1, i, 1, element] = -sign(Jtemp) * (-Y_xi) + normal_directions[2, i, 1, element] = -sign(Jtemp) * (X_xi) + + # +y side or side 3 in the local indexing + X_xi = jacobian_matrix[1, 1, i, N, element] + X_eta = jacobian_matrix[1, 2, i, N, element] + Y_xi = jacobian_matrix[2, 1, i, N, element] + Y_eta = jacobian_matrix[2, 2, i, N, element] + Jtemp = X_xi * Y_eta - X_eta * Y_xi + normal_directions[1, i, 3, element] = sign(Jtemp) * (-Y_xi) + normal_directions[2, i, 3, element] = sign(Jtemp) * (X_xi) + end + + return normal_directions +end +end # @muladd diff --git a/src/solvers/fdsbp_unstructured/fdsbp.jl b/src/solvers/fdsbp_unstructured/fdsbp.jl new file mode 100644 index 00000000000..dee9776abb7 --- /dev/null +++ b/src/solvers/fdsbp_unstructured/fdsbp.jl @@ -0,0 +1,14 @@ +# !!! warning "Experimental implementation (curvilinear FDSBP)" +# This is an experimental feature and may change in future releases. + +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# dimension specific curvilinear implementations and data structures +include("containers_2d.jl") +include("fdsbp_2d.jl") +end # @muladd diff --git a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl new file mode 100644 index 00000000000..b459f4c42cc --- /dev/null +++ b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl @@ -0,0 +1,219 @@ +# !!! warning "Experimental implementation (curvilinear FDSBP)" +# This is an experimental feature and may change in future releases. + +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# 2D unstructured cache +function create_cache(mesh::UnstructuredMesh2D, equations, dg::FDSBP, RealT, uEltype) + elements = init_elements(mesh, equations, dg.basis, RealT, uEltype) + + interfaces = init_interfaces(mesh, elements) + + boundaries = init_boundaries(mesh, elements) + + cache = (; elements, interfaces, boundaries) + + # Add specialized parts of the cache required to for efficient flux computations + cache = (; cache..., + create_cache(mesh, equations, dg.volume_integral, dg, uEltype)...) + + return cache +end + +# TODO: FD; Upwind versions of surface / volume integral + +# 2D volume integral contributions for `VolumeIntegralStrongForm` +# OBS! This is the standard (not de-aliased) form of the volume integral. +# So it is not provably stable for variable coefficients due to the the metric terms. +@inline function calc_volume_integral!(du, u, + mesh::UnstructuredMesh2D, + nonconservative_terms::False, equations, + volume_integral::VolumeIntegralStrongForm, + dg::FDSBP, cache) + D = dg.basis # SBP derivative operator + @unpack f_threaded = cache + @unpack contravariant_vectors = cache.elements + + # SBP operators from SummationByPartsOperators.jl implement the basic interface + # of matrix-vector multiplication. Thus, we pass an "array of structures", + # packing all variables per node in an `SVector`. + if nvariables(equations) == 1 + # `reinterpret(reshape, ...)` removes the leading dimension only if more + # than one variable is used. + u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u), + nnodes(dg), nnodes(dg), nelements(dg, cache)) + du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)}, + du), + nnodes(dg), nnodes(dg), nelements(dg, cache)) + else + u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u) + du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)}, + du) + end + + # Use the tensor product structure to compute the discrete derivatives of + # the contravariant fluxes line-by-line and add them to `du` for each element. + @threaded for element in eachelement(dg, cache) + f_element = f_threaded[Threads.threadid()] + u_element = view(u_vectors, :, :, element) + + # x direction + for j in eachnode(dg) + for i in eachnode(dg) + Ja1 = get_contravariant_vector(1, contravariant_vectors, i, j, element) + f_element[i, j] = flux(u_element[i, j], Ja1, equations) + end + mul!(view(du_vectors, :, j, element), D, view(f_element, :, j), + one(eltype(du)), one(eltype(du))) + end + + # y direction + for i in eachnode(dg) + for j in eachnode(dg) + Ja2 = get_contravariant_vector(2, contravariant_vectors, i, j, element) + f_element[i, j] = flux(u_element[i, j], Ja2, equations) + end + mul!(view(du_vectors, i, :, element), D, view(f_element, i, :), + one(eltype(du)), one(eltype(du))) + end + end + + return nothing +end + +# Note! The local side numbering for the unstructured quadrilateral element implementation differs +# from the structured TreeMesh or StructuredMesh local side numbering: +# +# TreeMesh/StructuredMesh sides versus UnstructuredMesh sides +# 4 3 +# ----------------- ----------------- +# | | | | +# | ^ eta | | ^ eta | +# 1 | | | 2 4 | | | 2 +# | | | | | | +# | ---> xi | | ---> xi | +# ----------------- ----------------- +# 3 1 +# Therefore, we require a different surface integral routine here despite their similar structure. +# Also, the normal directions are already outward pointing for `UnstructuredMesh2D` so all the +# surface contributions are added. +function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, + equations, surface_integral::SurfaceIntegralStrongForm, + dg::DG, cache) + inv_weight_left = inv(left_boundary_weight(dg.basis)) + inv_weight_right = inv(right_boundary_weight(dg.basis)) + @unpack normal_directions, surface_flux_values = cache.elements + + @threaded for element in eachelement(dg, cache) + for l in eachnode(dg) + # surface at -x + u_node = get_node_vars(u, equations, dg, 1, l, element) + # compute internal flux in normal direction on side 4 + outward_direction = get_node_coords(normal_directions, equations, dg, l, 4, + element) + f_node = flux(u_node, outward_direction, equations) + f_num = get_node_vars(surface_flux_values, equations, dg, l, 4, element) + multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node), + equations, dg, 1, l, element) + + # surface at +x + u_node = get_node_vars(u, equations, dg, nnodes(dg), l, element) + # compute internal flux in normal direction on side 2 + outward_direction = get_node_coords(normal_directions, equations, dg, l, 2, + element) + f_node = flux(u_node, outward_direction, equations) + f_num = get_node_vars(surface_flux_values, equations, dg, l, 2, element) + multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node), + equations, dg, nnodes(dg), l, element) + + # surface at -y + u_node = get_node_vars(u, equations, dg, l, 1, element) + # compute internal flux in normal direction on side 1 + outward_direction = get_node_coords(normal_directions, equations, dg, l, 1, + element) + f_node = flux(u_node, outward_direction, equations) + f_num = get_node_vars(surface_flux_values, equations, dg, l, 1, element) + multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node), + equations, dg, l, 1, element) + + # surface at +y + u_node = get_node_vars(u, equations, dg, l, nnodes(dg), element) + # compute internal flux in normal direction on side 3 + outward_direction = get_node_coords(normal_directions, equations, dg, l, 3, + element) + f_node = flux(u_node, outward_direction, equations) + f_num = get_node_vars(surface_flux_values, equations, dg, l, 3, element) + multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node), + equations, dg, l, nnodes(dg), element) + end + end + + return nothing +end + +# AnalysisCallback +function integrate_via_indices(func::Func, u, + mesh::UnstructuredMesh2D, equations, + dg::FDSBP, cache, args...; normalize = true) where {Func} + # TODO: FD. This is rather inefficient right now and allocates... + weights = diag(SummationByPartsOperators.mass_matrix(dg.basis)) + + # Initialize integral with zeros of the right shape + integral = zero(func(u, 1, 1, 1, equations, dg, args...)) + total_volume = zero(real(mesh)) + + # Use quadrature to numerically integrate over entire domain + for element in eachelement(dg, cache) + for j in eachnode(dg), i in eachnode(dg) + volume_jacobian = abs(inv(cache.elements.inverse_jacobian[i, j, element])) + integral += volume_jacobian * weights[i] * weights[j] * + func(u, i, j, element, equations, dg, args...) + total_volume += volume_jacobian * weights[i] * weights[j] + end + end + + # Normalize with total volume + if normalize + integral = integral / total_volume + end + + return integral +end + +function calc_error_norms(func, u, t, analyzer, + mesh::UnstructuredMesh2D, equations, initial_condition, + dg::FDSBP, cache, cache_analysis) + # TODO: FD. This is rather inefficient right now and allocates... + weights = diag(SummationByPartsOperators.mass_matrix(dg.basis)) + @unpack node_coordinates, inverse_jacobian = cache.elements + + # Set up data structures + l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1), equations)) + linf_error = copy(l2_error) + total_volume = zero(real(mesh)) + + # Iterate over all elements for error calculations + for element in eachelement(dg, cache) + for j in eachnode(analyzer), i in eachnode(analyzer) + volume_jacobian = abs(inv(cache.elements.inverse_jacobian[i, j, element])) + u_exact = initial_condition(get_node_coords(node_coordinates, equations, dg, + i, j, element), t, equations) + diff = func(u_exact, equations) - + func(get_node_vars(u, equations, dg, i, j, element), equations) + l2_error += diff .^ 2 * (weights[i] * weights[j] * volume_jacobian) + linf_error = @. max(linf_error, abs(diff)) + total_volume += weights[i] * weights[j] * volume_jacobian + end + end + + # For L2 error, divide by total volume + l2_error = @. sqrt(l2_error / total_volume) + + return l2_error, linf_error +end +end # @muladd diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 5341d86a7d1..139b423ead1 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -664,6 +664,67 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +# TODO: FD; for now put the unstructured tests for the 2D FDSBP here. +@trixi_testset "FDSBP (central): elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_advection_basic.jl"), + l2=[0.0001105211407319266], + linf=[0.0004199363734466166]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "FDSBP (central): elixir_euler_source_terms.jl" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_source_terms.jl"), + l2=[8.155544666380138e-5, + 0.0001477863788446318, + 0.00014778637884460072, + 0.00045584189984542687], + linf=[0.0002670775876922882, + 0.0005683064706873964, + 0.0005683064706762941, + 0.0017770812025146299], + tspan=(0.0, 0.05)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "FDSBP (central): elixir_euler_free_stream.jl" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_free_stream.jl"), + l2=[5.4329175009362306e-14, + 1.0066867437607972e-13, + 6.889210012578449e-14, + 1.568290814572709e-13], + linf=[5.963762816918461e-10, + 5.08869890669672e-11, + 1.1581377523661729e-10, + 4.61017890529547e-11], + tspan=(0.0, 0.1), + atol=1.0e-11) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory From c7c4cf7c827177fa3e32942aa58b316468ac0295 Mon Sep 17 00:00:00 2001 From: Krissh Chawla <127906314+KrisshChawla@users.noreply.github.com> Date: Thu, 14 Dec 2023 03:54:19 -0600 Subject: [PATCH 053/166] Compressible Euler Quasi-1D (#1757) * implementation of quasi 1d compressible Euler Equation * added example elixir for quasi 1d compressible Euler equations * added example elixir with a discontinuous initial condition * including and exported CompressibleEulerEquationsQuasi1D * formatting * added entropy conservative test * fixing spelling * formatting * Update examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * updated test_tree_1d_euler.jl and formatting * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * adding consistency check for flux * Update compressible_euler_quasi_1d.jl * formatting * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update compressible_euler_quasi_1d and test_tree_1d_euler * formatting * Update examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update examples/tree_1d_dgsem/elixir_euler_quasi_1d_ec.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update examples/tree_1d_dgsem/elixir_euler_quasi_1d_source_terms.jl Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Daniel Doehring * update boundary condition slip wall * update compressible_euler_quasi_1d.jl * Update src/equations/compressible_euler_quasi_1d.jl * Update src/equations/compressible_euler_quasi_1d.jl * remove boundary_condition_slip_wall --------- Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> Co-authored-by: Daniel Doehring Co-authored-by: Daniel Doehring Co-authored-by: Hendrik Ranocha --- .../elixir_euler_quasi_1d_discontinuous.jl | 85 +++++ .../tree_1d_dgsem/elixir_euler_quasi_1d_ec.jl | 73 ++++ .../elixir_euler_quasi_1d_source_terms.jl | 60 ++++ src/Trixi.jl | 1 + src/equations/compressible_euler_quasi_1d.jl | 328 ++++++++++++++++++ src/equations/equations.jl | 1 + test/test_tree_1d_euler.jl | 73 ++++ test/test_unit.jl | 13 + 8 files changed, 634 insertions(+) create mode 100644 examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl create mode 100644 examples/tree_1d_dgsem/elixir_euler_quasi_1d_ec.jl create mode 100644 examples/tree_1d_dgsem/elixir_euler_quasi_1d_source_terms.jl create mode 100644 src/equations/compressible_euler_quasi_1d.jl diff --git a/examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl new file mode 100644 index 00000000000..cc4535be028 --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl @@ -0,0 +1,85 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# Semidiscretization of the quasi 1d compressible Euler equations +# See Chan et al. https://doi.org/10.48550/arXiv.2307.12089 for details + +equations = CompressibleEulerEquationsQuasi1D(1.4) + +""" + initial_condition_discontinuity(x, t, equations::CompressibleEulerEquations1D) + +A discontinuous initial condition taken from +- Jesse Chan, Khemraj Shukla, Xinhui Wu, Ruofeng Liu, Prani Nalluri (2023) + High order entropy stable schemes for the quasi-one-dimensional + shallow water and compressible Euler equations + [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089) +""" +function initial_condition_discontinuity(x, t, + equations::CompressibleEulerEquationsQuasi1D) + rho = (x[1] < 0) ? 3.4718 : 2.0 + v1 = (x[1] < 0) ? -2.5923 : -3.0 + p = (x[1] < 0) ? 5.7118 : 2.639 + a = (x[1] < 0) ? 1.0 : 1.5 + + return prim2cons(SVector(rho, v1, p, a), equations) +end + +initial_condition = initial_condition_discontinuity + +surface_flux = (flux_lax_friedrichs, flux_nonconservative_chan_etal) +volume_flux = (flux_chan_etal, flux_nonconservative_chan_etal) + +basis = LobattoLegendreBasis(3) +indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (-1.0,) +coordinates_max = (1.0,) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 6, + n_cells_max = 10_000) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 + +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 0.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_euler_quasi_1d_ec.jl b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_ec.jl new file mode 100644 index 00000000000..ae1b2b24b62 --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_ec.jl @@ -0,0 +1,73 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# Semidiscretization of the quasi 1d compressible Euler equations with a discontinuous nozzle width function. +# See Chan et al. https://doi.org/10.48550/arXiv.2307.12089 for details + +equations = CompressibleEulerEquationsQuasi1D(1.4) + +# Setup a truly discontinuous density function and nozzle width for +# this academic testcase of entropy conservation. The errors from the analysis +# callback are not important but the entropy error for this test case +# `∑∂S/∂U ⋅ Uₜ` should be around machine roundoff. +# Works as intended for TreeMesh1D with `initial_refinement_level=6`. If the mesh +# refinement level is changed the initial condition below may need changed as well to +# ensure that the discontinuities lie on an element interface. +function initial_condition_ec(x, t, equations::CompressibleEulerEquationsQuasi1D) + v1 = 0.1 + rho = 2.0 + 0.1 * x[1] + p = 3.0 + a = 2.0 + x[1] + + return prim2cons(SVector(rho, v1, p, a), equations) +end + +initial_condition = initial_condition_ec + +surface_flux = (flux_chan_etal, flux_nonconservative_chan_etal) +volume_flux = surface_flux +solver = DGSEM(polydeg = 4, surface_flux = surface_flux, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (-1.0,) +coordinates_max = (1.0,) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 6, + n_cells_max = 10_000) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.4) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 + +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 0.8) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_euler_quasi_1d_source_terms.jl b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_source_terms.jl new file mode 100644 index 00000000000..91bb1ba6e8c --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_source_terms.jl @@ -0,0 +1,60 @@ +using OrdinaryDiffEq +using Trixi +using ForwardDiff + +############################################################################### +# Semidiscretization of the quasi 1d compressible Euler equations +# See Chan et al. https://doi.org/10.48550/arXiv.2307.12089 for details + +equations = CompressibleEulerEquationsQuasi1D(1.4) + +initial_condition = initial_condition_convergence_test + +surface_flux = (flux_chan_etal, flux_nonconservative_chan_etal) +volume_flux = surface_flux +solver = DGSEM(polydeg = 4, surface_flux = surface_flux, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = -1.0 +coordinates_max = 1.0 +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_errors = (:l2_error_primitive, + :linf_error_primitive)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 0.8) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index b8110cf5bdd..e7b849e2642 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -139,6 +139,7 @@ export AcousticPerturbationEquations2D, CompressibleEulerEquations3D, CompressibleEulerMulticomponentEquations1D, CompressibleEulerMulticomponentEquations2D, + CompressibleEulerEquationsQuasi1D, IdealGlmMhdEquations1D, IdealGlmMhdEquations2D, IdealGlmMhdEquations3D, IdealGlmMhdMulticomponentEquations1D, IdealGlmMhdMulticomponentEquations2D, HyperbolicDiffusionEquations1D, HyperbolicDiffusionEquations2D, diff --git a/src/equations/compressible_euler_quasi_1d.jl b/src/equations/compressible_euler_quasi_1d.jl new file mode 100644 index 00000000000..0a543277ee4 --- /dev/null +++ b/src/equations/compressible_euler_quasi_1d.jl @@ -0,0 +1,328 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + CompressibleEulerEquationsQuasi1D(gamma) + +The quasi-1d compressible Euler equations (see Chan et al. [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089) for details) +```math +\frac{\partial}{\partial t} +\begin{pmatrix} +a \rho \\ a \rho v_1 \\ a e +\end{pmatrix} ++ +\frac{\partial}{\partial x} +\begin{pmatrix} +a \rho v_1 \\ a \rho v_1^2 \\ a v_1 (e +p) +\end{pmatrix} ++ +a \frac{\partial}{\partial x} +\begin{pmatrix} +0 \\ p \\ 0 +\end{pmatrix} += +\begin{pmatrix} +0 \\ 0 \\ 0 +\end{pmatrix} +``` +for an ideal gas with ratio of specific heats `gamma` in one space dimension. +Here, ``\rho`` is the density, ``v_1`` the velocity, ``e`` the specific total energy **rather than** specific internal energy, +``a`` the (possibly) variable nozzle width, and +```math +p = (\gamma - 1) \left( e - \frac{1}{2} \rho v_1^2 \right) +``` +the pressure. + +The nozzle width function ``a(x)`` is set inside the initial condition routine +for a particular problem setup. To test the conservative form of the compressible Euler equations one can set the +nozzle width variable ``a`` to one. + +In addition to the unknowns, Trixi.jl currently stores the nozzle width values at the approximation points +despite being fixed in time. +This affects the implementation and use of these equations in various ways: +* The flux values corresponding to the nozzle width must be zero. +* The nozzle width values must be included when defining initial conditions, boundary conditions or + source terms. +* [`AnalysisCallback`](@ref) analyzes this variable. +* Trixi.jl's visualization tools will visualize the nozzle width by default. +""" +struct CompressibleEulerEquationsQuasi1D{RealT <: Real} <: + AbstractCompressibleEulerEquations{1, 4} + gamma::RealT # ratio of specific heats + inv_gamma_minus_one::RealT # = inv(gamma - 1); can be used to write slow divisions as fast multiplications + + function CompressibleEulerEquationsQuasi1D(gamma) + γ, inv_gamma_minus_one = promote(gamma, inv(gamma - 1)) + new{typeof(γ)}(γ, inv_gamma_minus_one) + end +end + +have_nonconservative_terms(::CompressibleEulerEquationsQuasi1D) = True() +function varnames(::typeof(cons2cons), ::CompressibleEulerEquationsQuasi1D) + ("a_rho", "a_rho_v1", "a_e", "a") +end +function varnames(::typeof(cons2prim), ::CompressibleEulerEquationsQuasi1D) + ("rho", "v1", "p", "a") +end + +""" + initial_condition_convergence_test(x, t, equations::CompressibleEulerEquationsQuasi1D) + +A smooth initial condition used for convergence tests in combination with +[`source_terms_convergence_test`](@ref) +(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains). +""" +function initial_condition_convergence_test(x, t, + equations::CompressibleEulerEquationsQuasi1D) + c = 2 + A = 0.1 + L = 2 + f = 1 / L + ω = 2 * pi * f + ini = c + A * sin(ω * (x[1] - t)) + + rho = ini + v1 = 1.0 + e = ini^2 / rho + p = (equations.gamma - 1) * (e - 0.5 * rho * v1^2) + a = 1.5 - 0.5 * cos(x[1] * pi) + + return prim2cons(SVector(rho, v1, p, a), equations) +end + +""" + source_terms_convergence_test(u, x, t, equations::CompressibleEulerEquationsQuasi1D) + +Source terms used for convergence tests in combination with +[`initial_condition_convergence_test`](@ref) +(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains). + +This manufactured solution source term is specifically designed for the mozzle width 'a(x) = 1.5 - 0.5 * cos(x[1] * pi)' +as defined in [`initial_condition_convergence_test`](@ref). +""" +@inline function source_terms_convergence_test(u, x, t, + equations::CompressibleEulerEquationsQuasi1D) + # Same settings as in `initial_condition_convergence_test`. + # Derivatives calculated with ForwardDiff.jl + c = 2 + A = 0.1 + L = 2 + f = 1 / L + ω = 2 * pi * f + x1, = x + ini(x1, t) = c + A * sin(ω * (x1 - t)) + + rho(x1, t) = ini(x1, t) + v1(x1, t) = 1.0 + e(x1, t) = ini(x1, t)^2 / rho(x1, t) + p1(x1, t) = (equations.gamma - 1) * (e(x1, t) - 0.5 * rho(x1, t) * v1(x1, t)^2) + a(x1, t) = 1.5 - 0.5 * cos(x1 * pi) + + arho(x1, t) = a(x1, t) * rho(x1, t) + arhou(x1, t) = arho(x1, t) * v1(x1, t) + aE(x1, t) = a(x1, t) * e(x1, t) + + darho_dt(x1, t) = ForwardDiff.derivative(t -> arho(x1, t), t) + darhou_dx(x1, t) = ForwardDiff.derivative(x1 -> arhou(x1, t), x1) + + arhouu(x1, t) = arhou(x1, t) * v1(x1, t) + darhou_dt(x1, t) = ForwardDiff.derivative(t -> arhou(x1, t), t) + darhouu_dx(x1, t) = ForwardDiff.derivative(x1 -> arhouu(x1, t), x1) + dp1_dx(x1, t) = ForwardDiff.derivative(x1 -> p1(x1, t), x1) + + auEp(x1, t) = a(x1, t) * v1(x1, t) * (e(x1, t) + p1(x1, t)) + daE_dt(x1, t) = ForwardDiff.derivative(t -> aE(x1, t), t) + dauEp_dx(x1, t) = ForwardDiff.derivative(x1 -> auEp(x1, t), x1) + + du1 = darho_dt(x1, t) + darhou_dx(x1, t) + du2 = darhou_dt(x1, t) + darhouu_dx(x1, t) + a(x1, t) * dp1_dx(x1, t) + du3 = daE_dt(x1, t) + dauEp_dx(x1, t) + + return SVector(du1, du2, du3, 0.0) +end + +# Calculate 1D flux for a single point +@inline function flux(u, orientation::Integer, + equations::CompressibleEulerEquationsQuasi1D) + a_rho, a_rho_v1, a_e, a = u + rho, v1, p, a = cons2prim(u, equations) + e = a_e / a + + # Ignore orientation since it is always "1" in 1D + f1 = a_rho_v1 + f2 = a_rho_v1 * v1 + f3 = a * v1 * (e + p) + + return SVector(f1, f2, f3, zero(eltype(u))) +end + +""" +@inline function flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquationsQuasi1D) + +Non-symmetric two-point volume flux discretizing the nonconservative (source) term +that contains the gradient of the pressure [`CompressibleEulerEquationsQuasi1D`](@ref) +and the nozzle width. + +Further details are available in the paper: +- Jesse Chan, Khemraj Shukla, Xinhui Wu, Ruofeng Liu, Prani Nalluri (2023) + High order entropy stable schemes for the quasi-one-dimensional + shallow water and compressible Euler equations + [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089) +""" +@inline function flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquationsQuasi1D) + #Variables + _, _, p_ll, a_ll = cons2prim(u_ll, equations) + _, _, p_rr, _ = cons2prim(u_rr, equations) + + # For flux differencing using non-conservative terms, we return the + # non-conservative flux scaled by 2. This cancels with a factor of 0.5 + # in the arithmetic average of {p}. + p_avg = p_ll + p_rr + + z = zero(eltype(u_ll)) + + return SVector(z, a_ll * p_avg, z, z) +end + +""" +@inline function flux_chan_etal(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquationsQuasi1D) + +Conservative (symmetric) part of the entropy conservative flux for quasi 1D compressible Euler equations split form. +This flux is a generalization of [`flux_ranocha`](@ref) for [`CompressibleEulerEquations1D`](@ref). +Further details are available in the paper: +- Jesse Chan, Khemraj Shukla, Xinhui Wu, Ruofeng Liu, Prani Nalluri (2023) + High order entropy stable schemes for the quasi-one-dimensional + shallow water and compressible Euler equations + [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089) +""" +@inline function flux_chan_etal(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquationsQuasi1D) + # Unpack left and right state + rho_ll, v1_ll, p_ll, a_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, p_rr, a_rr = cons2prim(u_rr, equations) + + # Compute the necessary mean values + rho_mean = ln_mean(rho_ll, rho_rr) + # Algebraically equivalent to `inv_ln_mean(rho_ll / p_ll, rho_rr / p_rr)` + # in exact arithmetic since + # log((ϱₗ/pₗ) / (ϱᵣ/pᵣ)) / (ϱₗ/pₗ - ϱᵣ/pᵣ) + # = pₗ pᵣ log((ϱₗ pᵣ) / (ϱᵣ pₗ)) / (ϱₗ pᵣ - ϱᵣ pₗ) + inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll) + v1_avg = 0.5 * (v1_ll + v1_rr) + a_v1_avg = 0.5 * (a_ll * v1_ll + a_rr * v1_rr) + p_avg = 0.5 * (p_ll + p_rr) + velocity_square_avg = 0.5 * (v1_ll * v1_rr) + + # Calculate fluxes + # Ignore orientation since it is always "1" in 1D + f1 = rho_mean * a_v1_avg + f2 = rho_mean * a_v1_avg * v1_avg + f3 = f1 * (velocity_square_avg + inv_rho_p_mean * equations.inv_gamma_minus_one) + + 0.5 * (p_ll * a_rr * v1_rr + p_rr * a_ll * v1_ll) + + return SVector(f1, f2, f3, zero(eltype(u_ll))) +end + +# Calculate estimates for maximum wave speed for local Lax-Friedrichs-type dissipation as the +# maximum velocity magnitude plus the maximum speed of sound +@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquationsQuasi1D) + a_rho_ll, a_rho_v1_ll, a_e_ll, a_ll = u_ll + a_rho_rr, a_rho_v1_rr, a_e_rr, a_rr = u_rr + + # Calculate primitive variables and speed of sound + rho_ll = a_rho_ll / a_ll + e_ll = a_e_ll / a_ll + v1_ll = a_rho_v1_ll / a_rho_ll + v_mag_ll = abs(v1_ll) + p_ll = (equations.gamma - 1) * (e_ll - 0.5 * rho_ll * v_mag_ll^2) + c_ll = sqrt(equations.gamma * p_ll / rho_ll) + rho_rr = a_rho_rr / a_rr + e_rr = a_e_rr / a_rr + v1_rr = a_rho_v1_rr / a_rho_rr + v_mag_rr = abs(v1_rr) + p_rr = (equations.gamma - 1) * (e_rr - 0.5 * rho_rr * v_mag_rr^2) + c_rr = sqrt(equations.gamma * p_rr / rho_rr) + + λ_max = max(v_mag_ll, v_mag_rr) + max(c_ll, c_rr) +end + +@inline function max_abs_speeds(u, equations::CompressibleEulerEquationsQuasi1D) + a_rho, a_rho_v1, a_e, a = u + rho = a_rho / a + v1 = a_rho_v1 / a_rho + e = a_e / a + p = (equations.gamma - 1) * (e - 0.5 * rho * v1^2) + c = sqrt(equations.gamma * p / rho) + + return (abs(v1) + c,) +end + +# Convert conservative variables to primitive. We use the convention that the primitive +# variables for the quasi-1D equations are `(rho, v1, p)` (i.e., the same as the primitive +# variables for `CompressibleEulerEquations1D`) +@inline function cons2prim(u, equations::CompressibleEulerEquationsQuasi1D) + a_rho, a_rho_v1, a_e, a = u + q = cons2prim(SVector(a_rho, a_rho_v1, a_e) / a, + CompressibleEulerEquations1D(equations.gamma)) + + return SVector(q[1], q[2], q[3], a) +end + +# The entropy for the quasi-1D compressible Euler equations is the entropy for the +# 1D compressible Euler equations scaled by the channel width `a`. +@inline function entropy(u, equations::CompressibleEulerEquationsQuasi1D) + a_rho, a_rho_v1, a_e, a = u + q = a * entropy(SVector(a_rho, a_rho_v1, a_e) / a, + CompressibleEulerEquations1D(equations.gamma)) + + return SVector(q[1], q[2], q[3], a) +end + +# Convert conservative variables to entropy. The entropy variables for the +# quasi-1D compressible Euler equations are identical to the entropy variables +# for the standard Euler equations for an appropriate definition of `entropy`. +@inline function cons2entropy(u, equations::CompressibleEulerEquationsQuasi1D) + a_rho, a_rho_v1, a_e, a = u + w = cons2entropy(SVector(a_rho, a_rho_v1, a_e) / a, + CompressibleEulerEquations1D(equations.gamma)) + + # we follow the convention for other spatially-varying equations such as + # `ShallowWaterEquations1D` and return the spatially varying coefficient + # `a` as the final entropy variable. + return SVector(w[1], w[2], w[3], a) +end + +# Convert primitive to conservative variables +@inline function prim2cons(u, equations::CompressibleEulerEquationsQuasi1D) + rho, v1, p, a = u + q = prim2cons(u, CompressibleEulerEquations1D(equations.gamma)) + + return SVector(a * q[1], a * q[2], a * q[3], a) +end + +@inline function density(u, equations::CompressibleEulerEquationsQuasi1D) + a_rho, _, _, a = u + rho = a_rho / a + return rho +end + +@inline function pressure(u, equations::CompressibleEulerEquationsQuasi1D) + a_rho, a_rho_v1, a_e, a = u + return pressure(SVector(a_rho, a_rho_v1, a_e) / a, + CompressibleEulerEquations1D(equations.gamma)) +end + +@inline function density_pressure(u, equations::CompressibleEulerEquationsQuasi1D) + a_rho, a_rho_v1, a_e, a = u + return density_pressure(SVector(a_rho, a_rho_v1, a_e) / a, + CompressibleEulerEquations1D(equations.gamma)) +end +end # @muladd diff --git a/src/equations/equations.jl b/src/equations/equations.jl index 582d672b756..7a3c326984d 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -409,6 +409,7 @@ abstract type AbstractCompressibleEulerEquations{NDIMS, NVARS} <: include("compressible_euler_1d.jl") include("compressible_euler_2d.jl") include("compressible_euler_3d.jl") +include("compressible_euler_quasi_1d.jl") # CompressibleEulerMulticomponentEquations abstract type AbstractCompressibleEulerMulticomponentEquations{NDIMS, NVARS, NCOMP} <: diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index 6cd7998ab02..39a1f6e30ba 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -393,6 +393,79 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_euler_quasi_1d_source_terms.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_quasi_1d_source_terms.jl"), + l2=[ + 3.876288369618363e-7, + 2.2247043122302947e-7, + 2.964004224572679e-7, + 5.2716983399807875e-8, + ], + linf=[ + 2.3925118561862746e-6, + 1.3603693522767912e-6, + 1.821888865105592e-6, + 1.1166012159335992e-7, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_euler_quasi_1d_discontinuous.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_quasi_1d_discontinuous.jl"), + l2=[ + 0.045510421156346015, + 0.036750584788912195, + 0.2468985959132176, + 0.03684494180829024, + ], + linf=[ + 0.3313374853025697, + 0.11621933362158643, + 1.827403013568638, + 0.28045939999015723, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_euler_quasi_1d_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_quasi_1d_ec.jl"), + l2=[ + 0.08889113985713998, + 0.16199235348889673, + 0.40316524365054346, + 2.9602775074723667e-16, + ], + linf=[ + 0.28891355898284043, + 0.3752709888964313, + 0.84477102402413, + 8.881784197001252e-16, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end end # module diff --git a/test/test_unit.jl b/test/test_unit.jl index d2e744da62f..b3ed29d38e3 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -654,6 +654,19 @@ end end end +@timed_testset "Consistency check for flux_chan_etal: CEEQ" begin + + # Set up equations and dummy conservative variables state + equations = CompressibleEulerEquationsQuasi1D(1.4) + u = SVector(1.1, 2.34, 5.5, 2.73) + + orientations = [1] + for orientation in orientations + @test flux_chan_etal(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end +end + @timed_testset "Consistency check for HLL flux (naive): LEE" begin flux_hll = FluxHLL(min_max_speed_naive) From f19144435650e626c7c57d48060ea0a03e247895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Rueda-Ram=C3=ADrez?= Date: Thu, 14 Dec 2023 13:02:17 +0100 Subject: [PATCH 054/166] Enable save solution with time intervals for SimpleIntegratorSSP (#1677) * First attempt to enable save solution with time intervals for SimpleIntegratorSSP * Importing `add_tstop!` * First working version of SimpleIntegratorSSP with SaveSolutionCallback using time intervals * Improved formatting and updated reference solution for test * Modified initialization of tstops to ensure a stop at the end of the simulation * Added missing docstring * Removed OrdinaryDiffEq from Trixi's dependencies * Empty tstops BinaryHeap during the call to `terminate!(integrator::SimpleIntegratorSSP)` * Fixed bug and added explanatory comments * Updated Project.toml * format --------- Co-authored-by: Hendrik Ranocha --- Project.toml | 2 + .../elixir_euler_shockcapturing_subcell.jl | 2 +- src/Trixi.jl | 4 +- src/time_integration/methods_SSP.jl | 75 +++++++++++++++++-- test/test_tree_2d_euler.jl | 16 ++-- 5 files changed, 82 insertions(+), 17 deletions(-) diff --git a/Project.toml b/Project.toml index 539dafc3034..267a3aa7066 100644 --- a/Project.toml +++ b/Project.toml @@ -6,6 +6,7 @@ version = "0.6.5-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" +DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" @@ -52,6 +53,7 @@ TrixiMakieExt = "Makie" [compat] CodeTracking = "1.0.5" ConstructionBase = "1.3" +DataStructures = "0.18.15" DiffEqCallbacks = "2.25" EllipsisNotation = "1.0" FillArrays = "0.13.2, 1" diff --git a/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl index 282805a0e03..44e63a0872e 100644 --- a/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl @@ -67,7 +67,7 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) -save_solution = SaveSolutionCallback(interval = 100, +save_solution = SaveSolutionCallback(dt = 0.1, save_initial_solution = true, save_final_solution = true, solution_variables = cons2prim) diff --git a/src/Trixi.jl b/src/Trixi.jl index e7b849e2642..e18b2f6415c 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -37,7 +37,8 @@ using SciMLBase: CallbackSet, DiscreteCallback, import SciMLBase: get_du, get_tmp_cache, u_modified!, AbstractODEIntegrator, init, step!, check_error, get_proposed_dt, set_proposed_dt!, - terminate!, remake + terminate!, remake, add_tstop!, has_tstop, first_tstop + using CodeTracking: CodeTracking using ConstructionBase: ConstructionBase using DiffEqCallbacks: PeriodicCallback, PeriodicCallbackAffect @@ -70,6 +71,7 @@ using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor @reexport using SimpleUnPack: @unpack using SimpleUnPack: @pack! +using DataStructures: BinaryHeap, FasterForward, extract_all! # finite difference SBP operators using SummationByPartsOperators: AbstractDerivativeOperator, diff --git a/src/time_integration/methods_SSP.jl b/src/time_integration/methods_SSP.jl index dbb9e51121b..9d1e06488b4 100644 --- a/src/time_integration/methods_SSP.jl +++ b/src/time_integration/methods_SSP.jl @@ -55,17 +55,25 @@ struct SimpleSSPRK33{StageCallbacks} <: SimpleAlgorithmSSP end # This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L1 -mutable struct SimpleIntegratorSSPOptions{Callback} +mutable struct SimpleIntegratorSSPOptions{Callback, TStops} callback::Callback # callbacks; used in Trixi adaptive::Bool # whether the algorithm is adaptive; ignored dtmax::Float64 # ignored maxiters::Int # maximal number of time steps - tstops::Vector{Float64} # tstops from https://diffeq.sciml.ai/v6.8/basics/common_solver_opts/#Output-Control-1; ignored + tstops::TStops # tstops from https://diffeq.sciml.ai/v6.8/basics/common_solver_opts/#Output-Control-1; ignored end function SimpleIntegratorSSPOptions(callback, tspan; maxiters = typemax(Int), kwargs...) - SimpleIntegratorSSPOptions{typeof(callback)}(callback, false, Inf, maxiters, - [last(tspan)]) + tstops_internal = BinaryHeap{eltype(tspan)}(FasterForward()) + # We add last(tspan) to make sure that the time integration stops at the end time + push!(tstops_internal, last(tspan)) + # We add 2 * last(tspan) because add_tstop!(integrator, t) is only called by DiffEqCallbacks.jl if tstops contains a time that is larger than t + # (https://github.com/SciML/DiffEqCallbacks.jl/blob/025dfe99029bd0f30a2e027582744528eb92cd24/src/iterative_and_periodic.jl#L92) + push!(tstops_internal, 2 * last(tspan)) + SimpleIntegratorSSPOptions{typeof(callback), typeof(tstops_internal)}(callback, + false, Inf, + maxiters, + tstops_internal) end # This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L77 @@ -78,6 +86,7 @@ mutable struct SimpleIntegratorSSP{RealT <: Real, uType, Params, Sol, F, Alg, du::uType r0::uType t::RealT + tdir::RealT dt::RealT # current time step dtcache::RealT # ignored iter::Int # current number of time steps (iteration) @@ -87,8 +96,29 @@ mutable struct SimpleIntegratorSSP{RealT <: Real, uType, Params, Sol, F, Alg, alg::Alg opts::SimpleIntegratorSSPOptions finalstep::Bool # added for convenience + dtchangeable::Bool + force_stepfail::Bool end +""" + add_tstop!(integrator::SimpleIntegratorSSP, t) +Add a time stop during the time integration process. +This function is called after the periodic SaveSolutionCallback to specify the next stop to save the solution. +""" +function add_tstop!(integrator::SimpleIntegratorSSP, t) + integrator.tdir * (t - integrator.t) < zero(integrator.t) && + error("Tried to add a tstop that is behind the current time. This is strictly forbidden") + # We need to remove the first entry of tstops when a new entry is added. + # Otherwise, the simulation gets stuck at the previous tstop and dt is adjusted to zero. + if length(integrator.opts.tstops) > 1 + pop!(integrator.opts.tstops) + end + push!(integrator.opts.tstops, integrator.tdir * t) +end + +has_tstop(integrator::SimpleIntegratorSSP) = !isempty(integrator.opts.tstops) +first_tstop(integrator::SimpleIntegratorSSP) = first(integrator.opts.tstops) + # Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771) function Base.getproperty(integrator::SimpleIntegratorSSP, field::Symbol) if field === :stats @@ -113,11 +143,13 @@ function solve(ode::ODEProblem, alg = SimpleSSPRK33()::SimpleAlgorithmSSP; du = similar(u) r0 = similar(u) t = first(ode.tspan) + tdir = sign(ode.tspan[end] - ode.tspan[1]) iter = 0 - integrator = SimpleIntegratorSSP(u, du, r0, t, dt, zero(dt), iter, ode.p, + integrator = SimpleIntegratorSSP(u, du, r0, t, tdir, dt, zero(dt), iter, ode.p, (prob = ode,), ode.f, alg, SimpleIntegratorSSPOptions(callback, ode.tspan; - kwargs...), false) + kwargs...), + false, true, false) # resize container resize!(integrator.p, nelements(integrator.p.solver, integrator.p.cache)) @@ -160,6 +192,8 @@ function solve!(integrator::SimpleIntegratorSSP) terminate!(integrator) end + modify_dt_for_tstops!(integrator) + @. integrator.r0 = integrator.u for stage in eachindex(alg.c) t_stage = integrator.t + integrator.dt * alg.c[stage] @@ -198,6 +232,10 @@ function solve!(integrator::SimpleIntegratorSSP) end end + # Empty the tstops array. + # This cannot be done in terminate!(integrator::SimpleIntegratorSSP) because DiffEqCallbacks.PeriodicCallbackAffect would return at error. + extract_all!(integrator.opts.tstops) + for stage_callback in alg.stage_callbacks finalize_callback(stage_callback, integrator.p) end @@ -226,7 +264,30 @@ end # stop the time integration function terminate!(integrator::SimpleIntegratorSSP) integrator.finalstep = true - empty!(integrator.opts.tstops) +end + +""" + modify_dt_for_tstops!(integrator::SimpleIntegratorSSP) +Modify the time-step size to match the time stops specified in integrator.opts.tstops. +To avoid adding OrdinaryDiffEq to Trixi's dependencies, this routine is a copy of +https://github.com/SciML/OrdinaryDiffEq.jl/blob/d76335281c540ee5a6d1bd8bb634713e004f62ee/src/integrators/integrator_utils.jl#L38-L54 +""" +function modify_dt_for_tstops!(integrator::SimpleIntegratorSSP) + if has_tstop(integrator) + tdir_t = integrator.tdir * integrator.t + tdir_tstop = first_tstop(integrator) + if integrator.opts.adaptive + integrator.dt = integrator.tdir * + min(abs(integrator.dt), abs(tdir_tstop - tdir_t)) # step! to the end + elseif iszero(integrator.dtcache) && integrator.dtchangeable + integrator.dt = integrator.tdir * abs(tdir_tstop - tdir_t) + elseif integrator.dtchangeable && !integrator.force_stepfail + # always try to step! with dtcache, but lower if a tstop + # however, if force_stepfail then don't set to dtcache, and no tstop worry + integrator.dt = integrator.tdir * + min(abs(integrator.dtcache), abs(tdir_tstop - tdir_t)) # step! to the end + end + end end # used for AMR diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl index 04a295537a3..65899cd5263 100644 --- a/test/test_tree_2d_euler.jl +++ b/test/test_tree_2d_euler.jl @@ -214,16 +214,16 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_shockcapturing_subcell.jl"), l2=[ - 0.08508147906199143, - 0.04510299017724501, - 0.045103019801950375, - 0.6930704343869766, + 0.08508152653623638, + 0.04510301725066843, + 0.04510304668512745, + 0.6930705064715306, ], linf=[ - 0.31123546471463326, - 0.5616274869594462, - 0.5619692712224448, - 2.88670199345138, + 0.31136518019691406, + 0.5617651935473419, + 0.5621200790240503, + 2.8866869108596056, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) From 40e1af41e49747f22908fc07acfc6eac92aec204 Mon Sep 17 00:00:00 2001 From: Yik Haw Teoh <66682196+teohyikhaw@users.noreply.github.com> Date: Thu, 14 Dec 2023 09:08:08 -0800 Subject: [PATCH 055/166] Fixed `cons2entropy` files and implemented `entropy2cons` for `CompressibleEulerMulticomponent1D` and `CompressibleEulerMulticomponent2D` (#1767) * initial bug fix * formatting * Added test cases for entropy2cons and cons2entropy for compressible multicomponent euler 1d and 2d * Fixed total entropy function call * minor typo * Updated formatting Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Updated formatting Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> * Fixed formatting issues * Update test/test_tree_1d_eulermulti.jl * Update test/test_tree_2d_eulermulti.jl --------- Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> --- .../compressible_euler_multicomponent_1d.jl | 78 +++- .../compressible_euler_multicomponent_2d.jl | 80 +++- test/test_tree_1d_eulermulti.jl | 241 +++++----- test/test_tree_2d_eulermulti.jl | 441 +++++++++--------- 4 files changed, 505 insertions(+), 335 deletions(-) diff --git a/src/equations/compressible_euler_multicomponent_1d.jl b/src/equations/compressible_euler_multicomponent_1d.jl index 8ddb0dcd08f..6338e04c3ed 100644 --- a/src/equations/compressible_euler_multicomponent_1d.jl +++ b/src/equations/compressible_euler_multicomponent_1d.jl @@ -461,6 +461,7 @@ end # Convert conservative variables to entropy @inline function cons2entropy(u, equations::CompressibleEulerMulticomponentEquations1D) @unpack cv, gammas, gas_constants = equations + rho_v1, rho_e = u rho = density(u, equations) @@ -480,21 +481,86 @@ end s = log(p) - gamma * log(rho) - log(gas_constant) rho_p = rho / p T = (rho_e - 0.5 * rho * v_square) / (help1) - entrop_rho = SVector{ncomponents(equations), real(equations)}(gas_constant * - ((gamma - s) / - (gamma - 1.0) - - (0.5 * v_square * - rho_p)) + + entrop_rho = SVector{ncomponents(equations), real(equations)}((cv[i] * + (1 - log(T)) + + gas_constants[i] * + (1 + log(u[i + 2])) - + v1^2 / (2 * T)) for i in eachcomponent(equations)) w1 = gas_constant * v1 * rho_p - w2 = gas_constant * (-1.0 * rho_p) + w2 = gas_constant * (-rho_p) entrop_other = SVector{2, real(equations)}(w1, w2) return vcat(entrop_other, entrop_rho) end +# Convert entropy variables to conservative variables +@inline function entropy2cons(w, equations::CompressibleEulerMulticomponentEquations1D) + @unpack gammas, gas_constants, cv, cp = equations + T = -1 / w[2] + v1 = w[1] * T + cons_rho = SVector{ncomponents(equations), real(equations)}(exp(1 / + gas_constants[i] * + (-cv[i] * + log(-w[2]) - + cp[i] + w[i + 2] - + 0.5 * w[1]^2 / + w[2])) + for i in eachcomponent(equations)) + + rho = zero(cons_rho[1]) + help1 = zero(cons_rho[1]) + help2 = zero(cons_rho[1]) + p = zero(cons_rho[1]) + for i in eachcomponent(equations) + rho += cons_rho[i] + help1 += cons_rho[i] * cv[i] * gammas[i] + help2 += cons_rho[i] * cv[i] + p += cons_rho[i] * gas_constants[i] * T + end + u1 = rho * v1 + gamma = help1 / help2 + u2 = p / (gamma - 1) + 0.5 * rho * v1^2 + cons_other = SVector{2, real(equations)}(u1, u2) + return vcat(cons_other, cons_rho) +end + +@inline function total_entropy(u, equations::CompressibleEulerMulticomponentEquations1D) + @unpack cv, gammas, gas_constants = equations + rho_v1, rho_e = u + rho = density(u, equations) + T = temperature(u, equations) + + total_entropy = zero(u[1]) + for i in eachcomponent(equations) + total_entropy -= u[i + 2] * (cv[i] * log(T) - gas_constants[i] * log(u[i + 2])) + end + + return total_entropy +end + +@inline function temperature(u, equations::CompressibleEulerMulticomponentEquations1D) + @unpack cv, gammas, gas_constants = equations + + rho_v1, rho_e = u + + rho = density(u, equations) + help1 = zero(rho) + + for i in eachcomponent(equations) + help1 += u[i + 2] * cv[i] + end + + v1 = rho_v1 / rho + v_square = v1^2 + T = (rho_e - 0.5 * rho * v_square) / help1 + + return T +end + """ totalgamma(u, equations::CompressibleEulerMulticomponentEquations1D) diff --git a/src/equations/compressible_euler_multicomponent_2d.jl b/src/equations/compressible_euler_multicomponent_2d.jl index 940d88b1aa5..60fce222f21 100644 --- a/src/equations/compressible_euler_multicomponent_2d.jl +++ b/src/equations/compressible_euler_multicomponent_2d.jl @@ -665,22 +665,57 @@ end s = log(p) - gamma * log(rho) - log(gas_constant) rho_p = rho / p T = (rho_e - 0.5 * rho * v_square) / (help1) - entrop_rho = SVector{ncomponents(equations), real(equations)}(gas_constant * - ((gamma - s) / - (gamma - 1.0) - - (0.5 * v_square * - rho_p)) + + entrop_rho = SVector{ncomponents(equations), real(equations)}((cv[i] * + (1 - log(T)) + + gas_constants[i] * + (1 + log(u[i + 3])) - + v_square / (2 * T)) for i in eachcomponent(equations)) w1 = gas_constant * v1 * rho_p w2 = gas_constant * v2 * rho_p - w3 = gas_constant * rho_p * (-1) + w3 = gas_constant * (-rho_p) entrop_other = SVector{3, real(equations)}(w1, w2, w3) return vcat(entrop_other, entrop_rho) end +# Convert entropy variables to conservative variables +@inline function entropy2cons(w, equations::CompressibleEulerMulticomponentEquations2D) + @unpack gammas, gas_constants, cp, cv = equations + T = -1 / w[3] + v1 = w[1] * T + v2 = w[2] * T + v_squared = v1^2 + v2^2 + cons_rho = SVector{ncomponents(equations), real(equations)}(exp((w[i + 3] - + cv[i] * + (1 - log(T)) + + v_squared / + (2 * T)) / + gas_constants[i] - + 1) + for i in eachcomponent(equations)) + + rho = zero(cons_rho[1]) + help1 = zero(cons_rho[1]) + help2 = zero(cons_rho[1]) + p = zero(cons_rho[1]) + for i in eachcomponent(equations) + rho += cons_rho[i] + help1 += cons_rho[i] * cv[i] * gammas[i] + help2 += cons_rho[i] * cv[i] + p += cons_rho[i] * gas_constants[i] * T + end + u1 = rho * v1 + u2 = rho * v2 + gamma = help1 / help2 + u3 = p / (gamma - 1) + 0.5 * rho * v_squared + cons_other = SVector{3, real(equations)}(u1, u2, u3) + return vcat(cons_other, cons_rho) +end + # Convert primitive to conservative variables @inline function prim2cons(prim, equations::CompressibleEulerMulticomponentEquations2D) @unpack cv, gammas = equations @@ -700,6 +735,39 @@ end return vcat(cons_other, cons_rho) end +@inline function total_entropy(u, equations::CompressibleEulerMulticomponentEquations2D) + @unpack cv, gammas, gas_constants = equations + rho = density(u, equations) + T = temperature(u, equations) + + total_entropy = zero(u[1]) + for i in eachcomponent(equations) + total_entropy -= u[i + 3] * (cv[i] * log(T) - gas_constants[i] * log(u[i + 3])) + end + + return total_entropy +end + +@inline function temperature(u, equations::CompressibleEulerMulticomponentEquations2D) + @unpack cv, gammas, gas_constants = equations + + rho_v1, rho_v2, rho_e = u + + rho = density(u, equations) + help1 = zero(rho) + + for i in eachcomponent(equations) + help1 += u[i + 3] * cv[i] + end + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v_square = v1^2 + v2^2 + T = (rho_e - 0.5 * rho * v_square) / help1 + + return T +end + """ totalgamma(u, equations::CompressibleEulerMulticomponentEquations2D) diff --git a/test/test_tree_1d_eulermulti.jl b/test/test_tree_1d_eulermulti.jl index bd86de928e3..b6c79ce03d1 100644 --- a/test/test_tree_1d_eulermulti.jl +++ b/test/test_tree_1d_eulermulti.jl @@ -2,136 +2,153 @@ module TestExamples1DEulerMulti using Test using Trixi +using ForwardDiff include("test_trixi.jl") EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") @testset "Compressible Euler Multicomponent" begin -#! format: noindent + @trixi_testset "Testing entropy2cons and cons2entropy" begin + using ForwardDiff + gammas = (1.3272378792562836, 1.5269959187969864, 1.8362285750521512, + 1.0409061360276926, 1.4652015053812224, 1.3626493264184423) + gas_constants = (1.817636851910076, 6.760820475922636, 5.588953939749113, + 6.31574782981543, 3.362932038038397, 3.212779569399733) + equations = CompressibleEulerMulticomponentEquations1D(gammas = SVector{length(gammas)}(gammas...), + gas_constants = SVector{length(gas_constants)}(gas_constants...)) + u = [-1.4632513788889214, 0.9908786980927811, 0.2909066990257628, + 0.6256623915420473, 0.4905882754313441, 0.14481800501749112, + 1.0333532872771651, 0.6805599818745411] + w = cons2entropy(u, equations) + # test that the entropy variables match the gradients of the total entropy + @test w ≈ ForwardDiff.gradient(u -> Trixi.total_entropy(u, equations), u) + # test that `entropy2cons` is the inverse of `cons2entropy` + @test entropy2cons(w, equations) ≈ u + end -@trixi_testset "elixir_eulermulti_ec.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_ec.jl"), - l2=[0.15330089521538684, 0.4417674632047301, - 0.016888510510282385, 0.03377702102056477, - 0.06755404204112954], - linf=[0.29130548795961864, 0.8847009003152357, - 0.034686525099975274, 0.06937305019995055, - 0.1387461003999011]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_ec.jl"), + l2=[0.15330089521538684, 0.4417674632047301, + 0.016888510510282385, 0.03377702102056477, + 0.06755404204112954], + linf=[0.29130548795961864, 0.8847009003152357, + 0.034686525099975274, 0.06937305019995055, + 0.1387461003999011]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_es.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_es.jl"), - l2=[ - 0.1522380497572071, - 0.43830846465313206, - 0.03907262116499431, - 0.07814524232998862, - ], - linf=[ - 0.24939193075537294, - 0.7139395740052739, - 0.06324208768391237, - 0.12648417536782475, - ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_es.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_es.jl"), + l2=[ + 0.1522380497572071, + 0.43830846465313206, + 0.03907262116499431, + 0.07814524232998862, + ], + linf=[ + 0.24939193075537294, + 0.7139395740052739, + 0.06324208768391237, + 0.12648417536782475, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_convergence_ec.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_ec.jl"), - l2=[ - 8.575236038539227e-5, - 0.00016387804318585358, - 1.9412699303977585e-5, - 3.882539860795517e-5, - ], - linf=[ - 0.00030593277277124464, - 0.0006244803933350696, - 7.253121435135679e-5, - 0.00014506242870271358, - ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_convergence_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_ec.jl"), + l2=[ + 8.575236038539227e-5, + 0.00016387804318585358, + 1.9412699303977585e-5, + 3.882539860795517e-5, + ], + linf=[ + 0.00030593277277124464, + 0.0006244803933350696, + 7.253121435135679e-5, + 0.00014506242870271358, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_convergence_es.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), - l2=[1.8983933794407234e-5, 6.207744299844731e-5, - 1.5466205761868047e-6, 3.0932411523736094e-6, - 6.186482304747219e-6, 1.2372964609494437e-5], - linf=[0.00012014372605895218, 0.0003313207215800418, - 6.50836791016296e-6, 1.301673582032592e-5, - 2.603347164065184e-5, 5.206694328130368e-5]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_convergence_es.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), + l2=[1.8983933794407234e-5, 6.207744299844731e-5, + 1.5466205761868047e-6, 3.0932411523736094e-6, + 6.186482304747219e-6, 1.2372964609494437e-5], + linf=[0.00012014372605895218, 0.0003313207215800418, + 6.50836791016296e-6, 1.301673582032592e-5, + 2.603347164065184e-5, 5.206694328130368e-5]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_convergence_es.jl with flux_chandrashekar" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), - l2=[1.888450477353845e-5, 5.4910600482795386e-5, - 9.426737161533622e-7, 1.8853474323067245e-6, - 3.770694864613449e-6, 7.541389729226898e-6], - linf=[0.00011622351152063004, 0.0003079221967086099, - 3.2177423254231563e-6, 6.435484650846313e-6, - 1.2870969301692625e-5, 2.574193860338525e-5], - volume_flux=flux_chandrashekar) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_convergence_es.jl with flux_chandrashekar" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), + l2=[1.888450477353845e-5, 5.4910600482795386e-5, + 9.426737161533622e-7, 1.8853474323067245e-6, + 3.770694864613449e-6, 7.541389729226898e-6], + linf=[0.00011622351152063004, 0.0003079221967086099, + 3.2177423254231563e-6, 6.435484650846313e-6, + 1.2870969301692625e-5, 2.574193860338525e-5], + volume_flux=flux_chandrashekar) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_two_interacting_blast_waves.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_eulermulti_two_interacting_blast_waves.jl"), - l2=[1.288867611915533, 82.71335258388848, 0.00350680272313187, - 0.013698784353152794, - 0.019179518517518084], - linf=[29.6413044707026, 1322.5844802186496, 0.09191919374782143, - 0.31092970966717925, - 0.4417989757182038], - tspan=(0.0, 0.0001)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_two_interacting_blast_waves.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_eulermulti_two_interacting_blast_waves.jl"), + l2=[1.288867611915533, 82.71335258388848, 0.00350680272313187, + 0.013698784353152794, + 0.019179518517518084], + linf=[29.6413044707026, 1322.5844802186496, 0.09191919374782143, + 0.31092970966717925, + 0.4417989757182038], + tspan=(0.0, 0.0001)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end -end end # module diff --git a/test/test_tree_2d_eulermulti.jl b/test/test_tree_2d_eulermulti.jl index 30d52b37b96..7c4a4e722e3 100644 --- a/test/test_tree_2d_eulermulti.jl +++ b/test/test_tree_2d_eulermulti.jl @@ -8,234 +8,253 @@ include("test_trixi.jl") EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") @testset "Compressible Euler Multicomponent" begin -#! format: noindent + @trixi_testset "Testing entropy2cons and cons2entropy" begin + using ForwardDiff + gammas = (1.1546412974182538, 1.1171560258914812, 1.097107661471476, + 1.0587601652669245, 1.6209889683979308, 1.6732209755396386, + 1.2954303574165822) + gas_constants = (5.969461071171914, 3.6660802003290183, 6.639008614675539, + 8.116604827140456, 6.190706056680031, 1.6795013743693712, + 2.197737590916966) + equations = CompressibleEulerMulticomponentEquations2D(gammas = SVector{length(gammas)}(gammas...), + gas_constants = SVector{length(gas_constants)}(gas_constants...)) + u = [-1.7433292819144075, 0.8844413258376495, 0.6050737175812364, + 0.8261998359817043, 1.0801186290896465, 0.505654488367698, + 0.6364415555805734, 0.851669392285058, 0.31219606420306223, + 1.0930477805612038] + w = cons2entropy(u, equations) + # test that the entropy variables match the gradients of the total entropy + @test w ≈ ForwardDiff.gradient(u -> Trixi.total_entropy(u, equations), u) + # test that `entropy2cons` is the inverse of `cons2entropy` + @test entropy2cons(w, equations) ≈ u + end -# NOTE: Some of the L2/Linf errors are comparably large. This is due to the fact that some of the -# simulations are set up with dimensional states. For example, the reference pressure in SI -# units is 101325 Pa, i.e., pressure has values of O(10^5) + # NOTE: Some of the L2/Linf errors are comparably large. This is due to the fact that some of the + # simulations are set up with dimensional states. For example, the reference pressure in SI + # units is 101325 Pa, i.e., pressure has values of O(10^5) -@trixi_testset "elixir_eulermulti_shock_bubble.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_shock_bubble.jl"), - l2=[ - 73.78467629094177, - 0.9174752929795251, - 57942.83587826468, - 0.1828847253029943, - 0.011127037850925347, - ], - linf=[ - 196.81051991521073, - 7.8456811648529605, - 158891.88930113698, - 0.811379581519794, - 0.08011973559187913, - ], - tspan=(0.0, 0.001)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_shock_bubble.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_shock_bubble.jl"), + l2=[ + 73.78467629094177, + 0.9174752929795251, + 57942.83587826468, + 0.1828847253029943, + 0.011127037850925347, + ], + linf=[ + 196.81051991521073, + 7.8456811648529605, + 158891.88930113698, + 0.811379581519794, + 0.08011973559187913, + ], + tspan=(0.0, 0.001)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl" begin - rm("out/deviations.txt", force = true) - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl"), - l2=[ - 81.52845664909304, - 2.5455678559421346, - 63229.190712645846, - 0.19929478404550321, - 0.011068604228443425, - ], - linf=[ - 249.21708417382013, - 40.33299887640794, - 174205.0118831558, - 0.6881458768113586, - 0.11274401158173972, - ], - initial_refinement_level=3, - tspan=(0.0, 0.001), - output_directory="out") - lines = readlines("out/deviations.txt") - @test lines[1] == "# iter, simu_time, rho1_min, rho2_min" - @test startswith(lines[end], "1") - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 + @trixi_testset "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl" begin + rm("out/deviations.txt", force = true) + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl"), + l2=[ + 81.52845664909304, + 2.5455678559421346, + 63229.190712645846, + 0.19929478404550321, + 0.011068604228443425, + ], + linf=[ + 249.21708417382013, + 40.33299887640794, + 174205.0118831558, + 0.6881458768113586, + 0.11274401158173972, + ], + initial_refinement_level=3, + tspan=(0.0, 0.001), + output_directory="out") + lines = readlines("out/deviations.txt") + @test lines[1] == "# iter, simu_time, rho1_min, rho2_min" + @test startswith(lines[end], "1") + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 + end end -end -@trixi_testset "elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl"), - l2=[ - 73.10832638093902, - 1.4599215762968585, - 57176.014861335476, - 0.17812843581838675, - 0.010123079422717837, - ], - linf=[ - 214.50568817511956, - 25.40392579616452, - 152862.41011222568, - 0.564195553101797, - 0.0956331651771212, - ], - initial_refinement_level=3, - tspan=(0.0, 0.001)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 + @trixi_testset "elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl"), + l2=[ + 73.10832638093902, + 1.4599215762968585, + 57176.014861335476, + 0.17812843581838675, + 0.010123079422717837, + ], + linf=[ + 214.50568817511956, + 25.40392579616452, + 152862.41011222568, + 0.564195553101797, + 0.0956331651771212, + ], + initial_refinement_level=3, + tspan=(0.0, 0.001)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 + end end -end -@trixi_testset "elixir_eulermulti_ec.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_ec.jl"), - l2=[ - 0.050182236154087095, - 0.050189894464434635, - 0.2258715597305131, - 0.06175171559771687, - ], - linf=[ - 0.3108124923284472, - 0.3107380389947733, - 1.054035804988521, - 0.29347582879608936, - ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_ec.jl"), + l2=[ + 0.050182236154087095, + 0.050189894464434635, + 0.2258715597305131, + 0.06175171559771687, + ], + linf=[ + 0.3108124923284472, + 0.3107380389947733, + 1.054035804988521, + 0.29347582879608936, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_es.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_es.jl"), - l2=[ - 0.0496546258404055, - 0.04965550099933263, - 0.22425206549856372, - 0.004087155041747821, - 0.008174310083495642, - 0.016348620166991283, - 0.032697240333982566, - ], - linf=[ - 0.2488251110766228, - 0.24832493304479406, - 0.9310354690058298, - 0.017452870465607374, - 0.03490574093121475, - 0.0698114818624295, - 0.139622963724859, - ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_es.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_es.jl"), + l2=[ + 0.0496546258404055, + 0.04965550099933263, + 0.22425206549856372, + 0.004087155041747821, + 0.008174310083495642, + 0.016348620166991283, + 0.032697240333982566, + ], + linf=[ + 0.2488251110766228, + 0.24832493304479406, + 0.9310354690058298, + 0.017452870465607374, + 0.03490574093121475, + 0.0698114818624295, + 0.139622963724859, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_convergence_ec.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_ec.jl"), - l2=[ - 0.00012290225488326508, - 0.00012290225488321876, - 0.00018867397906337653, - 4.8542321753649044e-5, - 9.708464350729809e-5, - ], - linf=[ - 0.0006722819239133315, - 0.0006722819239128874, - 0.0012662292789555885, - 0.0002843844182700561, - 0.0005687688365401122, - ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_convergence_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_ec.jl"), + l2=[ + 0.00012290225488326508, + 0.00012290225488321876, + 0.00018867397906337653, + 4.8542321753649044e-5, + 9.708464350729809e-5, + ], + linf=[ + 0.0006722819239133315, + 0.0006722819239128874, + 0.0012662292789555885, + 0.0002843844182700561, + 0.0005687688365401122, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_convergence_es.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), - l2=[ - 2.2661773867001696e-6, - 2.266177386666318e-6, - 6.593514692980009e-6, - 8.836308667348217e-7, - 1.7672617334696433e-6, - ], - linf=[ - 1.4713170997993075e-5, - 1.4713170997104896e-5, - 5.115618808515521e-5, - 5.3639516094383666e-6, - 1.0727903218876733e-5, - ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_convergence_es.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), + l2=[ + 2.2661773867001696e-6, + 2.266177386666318e-6, + 6.593514692980009e-6, + 8.836308667348217e-7, + 1.7672617334696433e-6, + ], + linf=[ + 1.4713170997993075e-5, + 1.4713170997104896e-5, + 5.115618808515521e-5, + 5.3639516094383666e-6, + 1.0727903218876733e-5, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end -end -@trixi_testset "elixir_eulermulti_convergence_es.jl with flux_chandrashekar" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), - l2=[ - 1.8621737639352465e-6, - 1.862173764098385e-6, - 5.942585713809631e-6, - 6.216263279534722e-7, - 1.2432526559069443e-6, - ], - linf=[ - 1.6235495582606063e-5, - 1.6235495576388814e-5, - 5.854523678827661e-5, - 5.790274858807898e-6, - 1.1580549717615796e-5, - ], - volume_flux=flux_chandrashekar) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + @trixi_testset "elixir_eulermulti_convergence_es.jl with flux_chandrashekar" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_es.jl"), + l2=[ + 1.8621737639352465e-6, + 1.862173764098385e-6, + 5.942585713809631e-6, + 6.216263279534722e-7, + 1.2432526559069443e-6, + ], + linf=[ + 1.6235495582606063e-5, + 1.6235495576388814e-5, + 5.854523678827661e-5, + 5.790274858807898e-6, + 1.1580549717615796e-5, + ], + volume_flux=flux_chandrashekar) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end -end end # module From 318e145e72110fe2acf0fd1f685bffe21e4df965 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 15 Dec 2023 17:09:16 +0100 Subject: [PATCH 056/166] CompatHelper: bump compat for T8code to 0.5, (keep existing compat) (#1775) Co-authored-by: CompatHelper Julia Co-authored-by: Hendrik Ranocha --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 267a3aa7066..a834ddeea73 100644 --- a/Project.toml +++ b/Project.toml @@ -86,7 +86,7 @@ StaticArrays = "1" StrideArrays = "0.1.18" StructArrays = "0.6" SummationByPartsOperators = "0.5.41" -T8code = "0.4.3" +T8code = "0.4.3, 0.5" TimerOutputs = "0.5.7" Triangulate = "2.0" TriplotBase = "0.1" From 6bef107cd322f7a31b50770219327de4345d93bc Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Sat, 16 Dec 2023 15:06:09 +0100 Subject: [PATCH 057/166] Add get_proposed_dt to custom integrators (#1776) --- src/time_integration/methods_2N.jl | 5 +++++ src/time_integration/methods_3Sstar.jl | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/src/time_integration/methods_2N.jl b/src/time_integration/methods_2N.jl index d2f22679c4f..f3b09b01e97 100644 --- a/src/time_integration/methods_2N.jl +++ b/src/time_integration/methods_2N.jl @@ -204,6 +204,11 @@ function set_proposed_dt!(integrator::SimpleIntegrator2N, dt) integrator.dt = dt end +# Required e.g. for `glm_speed_callback` +function get_proposed_dt(integrator::SimpleIntegrator2N) + return integrator.dt +end + # stop the time integration function terminate!(integrator::SimpleIntegrator2N) integrator.finalstep = true diff --git a/src/time_integration/methods_3Sstar.jl b/src/time_integration/methods_3Sstar.jl index b0ce5930514..7b70466606c 100644 --- a/src/time_integration/methods_3Sstar.jl +++ b/src/time_integration/methods_3Sstar.jl @@ -282,6 +282,11 @@ function set_proposed_dt!(integrator::SimpleIntegrator3Sstar, dt) integrator.dt = dt end +# Required e.g. for `glm_speed_callback` +function get_proposed_dt(integrator::SimpleIntegrator3Sstar) + return integrator.dt +end + # stop the time integration function terminate!(integrator::SimpleIntegrator3Sstar) integrator.finalstep = true From b5d0a50214abb9b33dc4a4859a4a12c6e3149cb1 Mon Sep 17 00:00:00 2001 From: Jesse Chan <1156048+jlchan@users.noreply.github.com> Date: Mon, 18 Dec 2023 08:36:23 -0600 Subject: [PATCH 058/166] Update NEWS.md (#1780) --- NEWS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEWS.md b/NEWS.md index 265979c3508..abd9fd27882 100644 --- a/NEWS.md +++ b/NEWS.md @@ -38,7 +38,7 @@ for human readability. - Capability to set truly discontinuous initial conditions in 1D. - Wetting and drying feature and examples for 1D and 2D shallow water equations - Implementation of the polytropic Euler equations in 2D -- Implementation of the quasi-1D shallow water equations +- Implementation of the quasi-1D shallow water and compressible Euler equations - Subcell (positivity and local min/max) limiting support for conservative variables in 2D for `TreeMesh` - AMR for hyperbolic-parabolic equations on 2D/3D `TreeMesh` From 5a5424c1fcefe602b4854477bebf96dc77c2e9b7 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 20 Dec 2023 07:33:30 +0100 Subject: [PATCH 059/166] set version to v0.6.5 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index a834ddeea73..1ef9ee13516 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.5-pre" +version = "0.6.5" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 622740a9db3c3e1b9bb1f5b0520b18b616d250e6 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 20 Dec 2023 07:33:42 +0100 Subject: [PATCH 060/166] set development version to v0.6.6-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 1ef9ee13516..6bedec18c78 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.5" +version = "0.6.6-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 24a365dbf2eb029b9dbd2be3e8404538bc54e89d Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 21 Dec 2023 09:13:10 +0100 Subject: [PATCH 061/166] hotfix: restrict DiffEqBase.jl to let CI pass (#1788) * hotfix: restrict DiffEqBase.jl to let CI pass * restrict DiffEqBase.jl in main Project.toml * Update Project.toml --- Project.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Project.toml b/Project.toml index 6bedec18c78..faf9f82d335 100644 --- a/Project.toml +++ b/Project.toml @@ -7,6 +7,7 @@ version = "0.6.6-pre" CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" @@ -54,6 +55,7 @@ TrixiMakieExt = "Makie" CodeTracking = "1.0.5" ConstructionBase = "1.3" DataStructures = "0.18.15" +DiffEqBase = "6 - 6.143" DiffEqCallbacks = "2.25" EllipsisNotation = "1.0" FillArrays = "0.13.2, 1" From 96c5bef1a68729ae61be5a6644d7b7b4756aac8e Mon Sep 17 00:00:00 2001 From: Ahmad Peyvan <115842305+apey236@users.noreply.github.com> Date: Sat, 23 Dec 2023 10:18:42 -0500 Subject: [PATCH 062/166] Parabolic Mortar for AMR `P4estMesh{3}` (#1765) * Clean branch * Un-Comment * un-comment * test coarsen * remove redundancy * Remove support for passive terms * expand resize * comments * format * Avoid code duplication * Update src/callbacks_step/amr_dg1d.jl Co-authored-by: Michael Schlottke-Lakemper * comment * comment & format * Try to increase coverage * Slightly more expressive names * Apply suggestions from code review * add specifier for 1d * Structs for resizing parabolic helpers * check if mortars are present * reuse `reinitialize_containers!` * resize calls for parabolic helpers * update analysis callbacks * Velocities for compr euler * Init container * correct copy-paste error * resize each dim * add dispatch * Add AMR for shear layer * USe only amr shear layer * first steps towards p4est parabolic amr * Add tests * remove plots * Format * remove redundant line * platform independent tests * No need for different flux_viscous comps after adding container_viscous to p4est * Laplace 3d * Longer times to allow converage to hit coarsen! * Increase testing of Laplace 3D * Add tests for velocities * remove comment * add elixir for amr testing * adding commented out mortar routines in 2D * Adding Mortar to 2d dg parabolic term * remove testing snippet * fix comments * add more arguments for dispatch * add some temporary todo notes * some updates for AP and KS * specialize mortar_fluxes_to_elements * BUGFIX: apply_jacobian_parabolic! was incorrect for P4estMesh * fixed rhs_parabolic! for mortars * more changes to elixir * indexing bug * comments * Adding the example for nonperiodic BCs with amr * hopefully this fixes AMR boundaries for parabolic terms * add elixir * Example with non periodic bopundary conditions * remove cruft * 3D parabolic amr * TGV elixir * Creating test for AMR 3D parabolic * Formatting * test formatting * Update src/Trixi.jl * Update src/equations/compressible_euler_1d.jl * Update src/equations/compressible_euler_2d.jl * Update src/equations/compressible_euler_3d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Delete examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Create elixir_navierstokes_blast_wave_amr.jl * Update test_parabolic_3d.jl * Update NEWS.md * Update NEWS.md * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl --------- Co-authored-by: Daniel_Doehring Co-authored-by: Daniel Doehring Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> Co-authored-by: Jesse Chan --- NEWS.md | 5 + .../elixir_navierstokes_blast_wave_amr.jl | 113 ++++++ ...ir_navierstokes_taylor_green_vortex_amr.jl | 106 ++++++ src/callbacks_step/amr_dg2d.jl | 4 +- src/equations/laplace_diffusion_3d.jl | 1 + src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 347 +++++++++++++++++- test/test_parabolic_3d.jl | 67 ++++ 7 files changed, 637 insertions(+), 6 deletions(-) create mode 100644 examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl create mode 100644 examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl diff --git a/NEWS.md b/NEWS.md index abd9fd27882..e7bdd14eab2 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,11 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes in the v0.6 lifecycle + +#### Added +- AMR for hyperbolic-parabolic equations on 3D `P4estMesh` + ## Changes when updating to v0.6 from v0.5.x #### Added diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl new file mode 100644 index 00000000000..5df89fbcdf2 --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl @@ -0,0 +1,113 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Navier-Stokes equations + +# TODO: parabolic; unify names of these accessor functions +prandtl_number() = 0.72 +mu() = 6.25e-4 # equivalent to Re = 1600 + +equations = CompressibleEulerEquations3D(1.4) +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), + Prandtl = prandtl_number()) + +function initial_condition_3d_blast_wave(x, t, equations::CompressibleEulerEquations3D) + rho_c = 1.0 + p_c = 1.0 + u_c = 0.0 + + rho_o = 0.125 + p_o = 0.1 + u_o = 0.0 + + rc = 0.5 + r = sqrt(x[1]^2 + x[2]^2 + x[3]^2) + if r < rc + rho = rho_c + v1 = u_c + v2 = u_c + v3 = u_c + p = p_c + else + rho = rho_o + v1 = u_o + v2 = u_o + v3 = u_o + p = p_o + end + + return prim2cons(SVector(rho, v1, v2, v3, p), equations) +end +initial_condition = initial_condition_3d_blast_wave + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) +indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 1.0, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +coordinates_min = (-1.0, -1.0, -1.0) .* pi +coordinates_max = (1.0, 1.0, 1.0) .* pi + +trees_per_dimension = (4, 4, 4) + +mesh = P4estMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (true, true, true), initial_refinement_level = 1) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.8) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = analysis_interval, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) + +amr_controller = ControllerThreeLevel(semi, amr_indicator, + base_level = 0, + med_level = 1, med_threshold = 0.05, + max_level = 3, max_threshold = 0.1) +amr_callback = AMRCallback(semi, amr_controller, + interval = 10, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + save_solution) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) +summary_callback() # print the timer summary diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl new file mode 100644 index 00000000000..c15227a1c29 --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl @@ -0,0 +1,106 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Navier-Stokes equations + +# TODO: parabolic; unify names of these accessor functions +prandtl_number() = 0.72 +mu() = 6.25e-4 # equivalent to Re = 1600 + +equations = CompressibleEulerEquations3D(1.4) +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), + Prandtl = prandtl_number()) + +""" + initial_condition_taylor_green_vortex(x, t, equations::CompressibleEulerEquations3D) + +The classical Taylor-Green vortex. +""" +function initial_condition_taylor_green_vortex(x, t, + equations::CompressibleEulerEquations3D) + A = 1.0 # magnitude of speed + Ms = 0.1 # maximum Mach number + + rho = 1.0 + v1 = A * sin(x[1]) * cos(x[2]) * cos(x[3]) + v2 = -A * cos(x[1]) * sin(x[2]) * cos(x[3]) + v3 = 0.0 + p = (A / Ms)^2 * rho / equations.gamma # scaling to get Ms + p = p + + 1.0 / 16.0 * A^2 * rho * + (cos(2 * x[1]) * cos(2 * x[3]) + 2 * cos(2 * x[2]) + 2 * cos(2 * x[1]) + + cos(2 * x[2]) * cos(2 * x[3])) + + return prim2cons(SVector(rho, v1, v2, v3, p), equations) +end +initial_condition = initial_condition_taylor_green_vortex + +@inline function vel_mag(u, equations::CompressibleEulerEquations3D) + rho, rho_v1, rho_v2, rho_v3, _ = u + return sqrt(rho_v1^2 + rho_v2^2 + rho_v3^2) / rho +end + +volume_flux = flux_ranocha +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (-1.0, -1.0, -1.0) .* pi +coordinates_max = (1.0, 1.0, 1.0) .* pi + +trees_per_dimension = (2, 2, 2) + +mesh = P4estMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (true, true, true), initial_refinement_level = 0) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 50 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + save_analysis = true, + extra_analysis_integrals = (energy_kinetic, + energy_internal, + enstrophy)) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +amr_indicator = IndicatorLöhner(semi, variable = vel_mag) + +amr_controller = ControllerThreeLevel(semi, amr_indicator, + base_level = 0, + med_level = 1, med_threshold = 0.1, + max_level = 3, max_threshold = 0.2) + +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = false, + adapt_initial_condition_only_refine = false) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + save_solution) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) +summary_callback() # print the timer summary diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index 969f9c564f3..98e531295b7 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -137,7 +137,7 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estM end function refine!(u_ode::AbstractVector, adaptor, - mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}}, + mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, elements_to_refine) # Call `refine!` for the hyperbolic part, which does the heavy lifting of @@ -299,7 +299,7 @@ function coarsen!(u_ode::AbstractVector, adaptor, end function coarsen!(u_ode::AbstractVector, adaptor, - mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}}, + mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, elements_to_remove) # Call `coarsen!` for the hyperbolic part, which does the heavy lifting of diff --git a/src/equations/laplace_diffusion_3d.jl b/src/equations/laplace_diffusion_3d.jl index 457e742430b..3988ce7144b 100644 --- a/src/equations/laplace_diffusion_3d.jl +++ b/src/equations/laplace_diffusion_3d.jl @@ -18,6 +18,7 @@ function varnames(variable_mapping, equations_parabolic::LaplaceDiffusion3D) varnames(variable_mapping, equations_parabolic.equations_hyperbolic) end +# no orientation specified since the flux is vector-valued function flux(u, gradients, orientation::Integer, equations_parabolic::LaplaceDiffusion3D) dudx, dudy, dudz = gradients if orientation == 1 diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index b06cdd42127..0bb97c7af02 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -102,8 +102,20 @@ function rhs_parabolic!(du, u, t, mesh::P4estMesh{3}, dg.surface_integral, dg) end - # TODO: parabolic; extend to mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars (specialized for AbstractEquationsParabolic) + # !!! NOTE: we reuse the hyperbolic cache here since it contains "mortars" and "u_threaded" + # !!! Is this OK? + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes (specialized for AbstractEquationsParabolic) + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, + mesh, equations_parabolic, dg.mortar, + dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -230,8 +242,23 @@ function calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, dg.surface_integral, dg) end - # TODO: parabolic; mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars. These should reuse the hyperbolic version of `prolong2mortars` + # !!! NOTE: we reuse the hyperbolic cache here, since it contains both `mortars` and `u_threaded`. + # !!! should we have a separate mortars/u_threaded in cache_parabolic? + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, u_transformed, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes. These should reuse the hyperbolic version of `calc_mortar_flux`, + # along with a specialization on `calc_mortar_flux!` and `mortar_fluxes_to_elements!` for + # AbstractEquationsParabolic. + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, + mesh, False(), # False() = no nonconservative terms + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -324,6 +351,93 @@ function calc_gradient!(gradients, u_transformed, t, return nothing end +# This version is called during `calc_gradients!` and must be specialized because the flux +# in the gradient is {u} which doesn't depend on normals. Thus, you don't need to scale by +# 2 and flip the sign when storing the mortar fluxes back into surface_flux_values +@inline function mortar_fluxes_to_elements!(surface_flux_values, + mesh::P4estMesh{3}, + equations::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + dg::DGSEM, cache, mortar, fstar, u_buffer, + fstar_tmp) + @unpack neighbor_ids, node_indices = cache.mortars + index_range = eachnode(dg) + # Copy solution small to small + small_indices = node_indices[1, mortar] + small_direction = indices2direction(small_indices) + + for position in 1:4 # Loop over small elements + element = neighbor_ids[position, mortar] + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + surface_flux_values[v, i, j, small_direction, element] = fstar[v, i, j, + position] + end + end + end + + # Project small fluxes to large element. + multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_lower, mortar_l2.reverse_lower, + view(fstar, .., 1), + fstar_tmp) + add_multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_upper, mortar_l2.reverse_lower, + view(fstar, .., 2), + fstar_tmp) + add_multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_lower, mortar_l2.reverse_upper, + view(fstar, .., 3), + fstar_tmp) + add_multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_upper, mortar_l2.reverse_upper, + view(fstar, .., 4), + fstar_tmp) + + # The flux is calculated in the outward direction of the small elements, + # so the sign must be switched to get the flux in outward direction + # of the large element. + # The contravariant vectors of the large element (and therefore the normal + # vectors of the large element as well) are twice as large as the + # contravariant vectors of the small elements. Therefore, the flux needs + # to be scaled by a factor of 2 to obtain the flux of the large element. + # u_buffer .*= 0.5 + + # Copy interpolated flux values from buffer to large element face in the + # correct orientation. + # Note that the index of the small sides will always run forward but + # the index of the large side might need to run backwards for flipped sides. + large_element = neighbor_ids[5, mortar] + large_indices = node_indices[2, mortar] + large_direction = indices2direction(large_indices) + large_surface_indices = surface_indices(large_indices) + + i_large_start, i_large_step_i, i_large_step_j = index_to_start_step_3d(large_surface_indices[1], + index_range) + j_large_start, j_large_step_i, j_large_step_j = index_to_start_step_3d(large_surface_indices[2], + index_range) + + # Note that the indices of the small sides will always run forward but + # the large indices might need to run backwards for flipped sides. + i_large = i_large_start + j_large = j_large_start + for j in eachnode(dg) + for i in eachnode(dg) + for v in eachvariable(equations) + surface_flux_values[v, i_large, j_large, large_direction, large_element] = u_buffer[v, + i, + j] + end + i_large += i_large_step_i + j_large += j_large_step_i + end + i_large += i_large_step_j + j_large += j_large_step_j + end + + return nothing +end + # This version is used for parabolic gradient computations @inline function calc_interface_flux!(surface_flux_values, mesh::P4estMesh{3}, nonconservative_terms::False, @@ -603,6 +717,231 @@ function calc_interface_flux!(surface_flux_values, return nothing end +function prolong2mortars_divergence!(cache, flux_viscous, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DGSEM) + @unpack neighbor_ids, node_indices = cache.mortars + @unpack fstar_tmp_threaded = cache + @unpack contravariant_vectors = cache.elements + index_range = eachnode(dg) + + flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous + + @threaded for mortar in eachmortar(dg, cache) + # Copy solution data from the small elements using "delayed indexing" with + # a start value and a step size to get the correct face and orientation. + small_indices = node_indices[1, mortar] + direction_index = indices2direction(small_indices) + + i_small_start, i_small_step_i, i_small_step_j = index_to_start_step_3d(small_indices[1], + index_range) + j_small_start, j_small_step_i, j_small_step_j = index_to_start_step_3d(small_indices[2], + index_range) + k_small_start, k_small_step_i, k_small_step_j = index_to_start_step_3d(small_indices[3], + index_range) + + for position in 1:4 # Loop over small elements + i_small = i_small_start + j_small = j_small_start + k_small = k_small_start + element = neighbor_ids[position, mortar] + for j in eachnode(dg) + for i in eachnode(dg) + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, + i_small, j_small, k_small, + element) + + for v in eachvariable(equations) + flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, k_small, + element], + flux_viscous_y[v, i_small, j_small, k_small, + element], + flux_viscous_z[v, i_small, j_small, k_small, + element]) + + cache.mortars.u[1, v, position, i, j, mortar] = dot(flux_viscous, + normal_direction) + end + i_small += i_small_step_i + j_small += j_small_step_i + k_small += k_small_step_i + end + i_small += i_small_step_j + j_small += j_small_step_j + k_small += k_small_step_j + end + end + + # Buffer to copy solution values of the large element in the correct orientation + # before interpolating + u_buffer = cache.u_threaded[Threads.threadid()] + + # temporary buffer for projections + fstar_tmp = fstar_tmp_threaded[Threads.threadid()] + + # Copy solution of large element face to buffer in the + # correct orientation + large_indices = node_indices[2, mortar] + + i_large_start, i_large_step_i, i_large_step_j = index_to_start_step_3d(large_indices[1], + index_range) + j_large_start, j_large_step_i, j_large_step_j = index_to_start_step_3d(large_indices[2], + index_range) + k_large_start, k_large_step_i, k_large_step_j = index_to_start_step_3d(large_indices[3], + index_range) + + i_large = i_large_start + j_large = j_large_start + k_large = k_large_start + element = neighbor_ids[5, mortar] # Large element + for j in eachnode(dg) + for i in eachnode(dg) + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, + i_large, j_large, k_large, element) + + for v in eachvariable(equations) + flux_viscous = SVector(flux_viscous_x[v, i_large, j_large, k_large, + element], + flux_viscous_y[v, i_large, j_large, k_large, + element], + flux_viscous_z[v, i_large, j_large, k_large, + element]) + + # We prolong the viscous flux dotted with respect the outward normal + # on the small element. We scale by -1/2 here because the normal + # direction on the large element is negative 2x that of the small + # element (these normal directions are "scaled" by the surface Jacobian) + u_buffer[v, i, j] = -0.5 * dot(flux_viscous, normal_direction) + end + i_large += i_large_step_i + j_large += j_large_step_i + k_large += k_large_step_i + end + i_large += i_large_step_j + j_large += j_large_step_j + k_large += k_large_step_j + end + + # Interpolate large element face data from buffer to small face locations + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 1, :, :, mortar), + mortar_l2.forward_lower, + mortar_l2.forward_lower, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 2, :, :, mortar), + mortar_l2.forward_upper, + mortar_l2.forward_lower, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 3, :, :, mortar), + mortar_l2.forward_lower, + mortar_l2.forward_upper, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 4, :, :, mortar), + mortar_l2.forward_upper, + mortar_l2.forward_upper, + u_buffer, + fstar_tmp) + end + + return nothing +end + +# We specialize `calc_mortar_flux!` for the divergence part of +# the parabolic terms. +function calc_mortar_flux_divergence!(surface_flux_values, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + equations::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DG, cache) + @unpack neighbor_ids, node_indices = cache.mortars + @unpack contravariant_vectors = cache.elements + @unpack fstar_threaded, fstar_tmp_threaded = cache + index_range = eachnode(dg) + + @threaded for mortar in eachmortar(dg, cache) + # Choose thread-specific pre-allocated container + fstar = fstar_threaded[Threads.threadid()] + fstar_tmp = fstar_tmp_threaded[Threads.threadid()] + + # Get index information on the small elements + small_indices = node_indices[1, mortar] + small_direction = indices2direction(small_indices) + + i_small_start, i_small_step_i, i_small_step_j = index_to_start_step_3d(small_indices[1], + index_range) + j_small_start, j_small_step_i, j_small_step_j = index_to_start_step_3d(small_indices[2], + index_range) + k_small_start, k_small_step_i, k_small_step_j = index_to_start_step_3d(small_indices[3], + index_range) + + for position in 1:4 # Loop over small elements + i_small = i_small_start + j_small = j_small_start + k_small = k_small_start + element = neighbor_ids[position, mortar] + for j in eachnode(dg) + for i in eachnode(dg) + for v in eachvariable(equations) + viscous_flux_normal_ll = cache.mortars.u[1, v, position, i, j, + mortar] + viscous_flux_normal_rr = cache.mortars.u[2, v, position, i, j, + mortar] + + # TODO: parabolic; only BR1 at the moment + fstar[v, i, j, position] = 0.5 * (viscous_flux_normal_ll + + viscous_flux_normal_rr) + end + + i_small += i_small_step_i + j_small += j_small_step_i + k_small += k_small_step_i + end + i_small += i_small_step_j + j_small += j_small_step_j + k_small += k_small_step_j + end + end + + # Buffer to interpolate flux values of the large element to before + # copying in the correct orientation + u_buffer = cache.u_threaded[Threads.threadid()] + + # this reuses the hyperbolic version of `mortar_fluxes_to_elements!` + mortar_fluxes_to_elements!(surface_flux_values, + mesh, equations, mortar_l2, dg, cache, + mortar, fstar, u_buffer, fstar_tmp) + end + + return nothing +end + +# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms. +# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for +# hyperbolic terms with conserved terms only, i.e., no nonconservative terms. +@inline function calc_mortar_flux!(fstar, + mesh::P4estMesh{3}, + nonconservative_terms::False, + equations::AbstractEquationsParabolic, + surface_integral, dg::DG, cache, + mortar_index, position_index, normal_direction, + i_node_index, j_node_index) + @unpack u = cache.mortars + @unpack surface_flux = surface_integral + + u_ll, u_rr = get_surface_node_vars(u, equations, dg, position_index, i_node_index, + j_node_index, mortar_index) + + # TODO: parabolic; only BR1 at the moment + flux_ = 0.5 * (u_ll + u_rr) + # Copy flux to buffer + set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, position_index) +end + # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` function prolong2boundaries!(cache_parabolic, flux_viscous, mesh::P4estMesh{3}, diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index d6c720cf0d9..6fbfb8259d4 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -429,6 +429,14 @@ end "elixir_advection_diffusion_amr.jl"), l2=[0.000355780485397024], linf=[0.0010810770271614256]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "TreeMesh3D: elixir_advection_diffusion_nonperiodic.jl" begin @@ -436,6 +444,65 @@ end "elixir_advection_diffusion_nonperiodic.jl"), l2=[0.0009808996243280868], linf=[0.01732621559135459]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "P4estMesh3D: elixir_navierstokes_taylor_green_vortex_amr.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + "elixir_navierstokes_taylor_green_vortex_amr.jl"), + initial_refinement_level=0, tspan=(0.0, 0.5), + l2=[ + 0.0016588740573444188, + 0.03437058632045721, + 0.03437058632045671, + 0.041038898400430075, + 0.30978593009044153, + ], + linf=[ + 0.004173569912012121, + 0.09168674832979556, + 0.09168674832975021, + 0.12129218723807476, + 0.8433893297612087, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "P4estMesh3D: elixir_navierstokes_blast_wave_amr.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + "elixir_navierstokes_blast_wave_amr.jl"), + tspan=(0.0, 0.01), + l2=[ + 0.009472104410520866, 0.0017883742549557149, + 0.0017883742549557147, 0.0017883742549557196, + 0.024388540048562748, + ], + linf=[ + 0.6782397526873181, 0.17663702154066238, + 0.17663702154066266, 0.17663702154066238, 1.7327849844825238, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end From dc8e9272d4e2f4b8df7fc4e72e12a786666fde47 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Sun, 24 Dec 2023 13:12:13 +0100 Subject: [PATCH 063/166] reset the timer also on non-root MPI processes (#1787) Co-authored-by: Michael Schlottke-Lakemper --- src/callbacks_step/summary.jl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/callbacks_step/summary.jl b/src/callbacks_step/summary.jl index 566f2c03418..21c7fc780a5 100644 --- a/src/callbacks_step/summary.jl +++ b/src/callbacks_step/summary.jl @@ -152,7 +152,13 @@ function initialize_summary_callback(cb::DiscreteCallback, u, t, integrator; Polyester.reset_threads!() end - mpi_isroot() || return nothing + # The summary callback should only print information on the root process. + # However, all other MPI processes should also reset the timer so that + # it can be used to diagnose performance. + if !mpi_isroot() + reset_timer!(timer()) + return nothing + end print_startup_message() From a633f9c30b89bca886278a716cafd98c6046becd Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Sat, 30 Dec 2023 15:49:50 +0100 Subject: [PATCH 064/166] Add HLLC flux for non-cartesian meshes to CompressibleEulerEquations{2,3}D (#1790) * add HLLC flux for non-cartesian meshes * add tests for HLLC flux * Add 2D test with HLLC * Update test_p4est_3d.jl * Update test_p4est_2d.jl * Update test_p4est_3d.jl * Update src/equations/compressible_euler_3d.jl Co-authored-by: Hendrik Ranocha * Update src/equations/compressible_euler_2d.jl Co-authored-by: Hendrik Ranocha * Update compressible_euler_2d.jl * Update compressible_euler_3d.jl * Update test_p4est_2d.jl * Update test_p4est_3d.jl * Update compressible_euler_2d.jl * Update compressible_euler_2d.jl --------- Co-authored-by: Daniel Doehring Co-authored-by: Hendrik Ranocha --- NEWS.md | 1 + src/equations/compressible_euler_2d.jl | 102 +++++++++++++++++++-- src/equations/compressible_euler_3d.jl | 119 +++++++++++++++++++++++-- test/test_p4est_2d.jl | 26 ++++++ test/test_p4est_3d.jl | 28 ++++++ test/test_unit.jl | 46 +++++++++- 6 files changed, 305 insertions(+), 17 deletions(-) diff --git a/NEWS.md b/NEWS.md index e7bdd14eab2..cf695912ed7 100644 --- a/NEWS.md +++ b/NEWS.md @@ -8,6 +8,7 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` +- `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` ## Changes when updating to v0.6 from v0.5.x diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl index a992f99eaf4..b0fd5c53f45 100644 --- a/src/equations/compressible_euler_2d.jl +++ b/src/equations/compressible_euler_2d.jl @@ -1150,7 +1150,7 @@ end end """ - flux_hllc(u_ll, u_rr, orientation, equations::CompressibleEulerEquations2D) + flux_hllc(u_ll, u_rr, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) Computes the HLLC flux (HLL with Contact) for compressible Euler equations developed by E.F. Toro [Lecture slides](http://www.prague-sum.com/download/2012/Toro_2-HLLC-RiemannSolver.pdf) @@ -1185,18 +1185,18 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, if orientation == 1 # x-direction vel_L = v1_ll vel_R = v1_rr - ekin_roe = (sqrt_rho_ll * v2_ll + sqrt_rho_rr * v2_rr)^2 elseif orientation == 2 # y-direction vel_L = v2_ll vel_R = v2_rr - ekin_roe = (sqrt_rho_ll * v1_ll + sqrt_rho_rr * v1_rr)^2 end vel_roe = (sqrt_rho_ll * vel_L + sqrt_rho_rr * vel_R) / sum_sqrt_rho - ekin_roe = 0.5 * (vel_roe^2 + ekin_roe / sum_sqrt_rho^2) + v1_roe = sqrt_rho_ll * v1_ll + sqrt_rho_rr * v1_rr + v2_roe = sqrt_rho_ll * v2_ll + sqrt_rho_rr * v2_rr + vel_roe_mag = (v1_roe^2 + v2_roe^2) / sum_sqrt_rho^2 H_ll = (rho_e_ll + p_ll) / rho_ll H_rr = (rho_e_rr + p_rr) / rho_rr H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho - c_roe = sqrt((equations.gamma - 1) * (H_roe - ekin_roe)) + c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag)) Ssl = min(vel_L - c_ll, vel_roe - c_roe) Ssr = max(vel_R + c_rr, vel_roe + c_roe) sMu_L = Ssl - vel_L @@ -1252,6 +1252,98 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, return SVector(f1, f2, f3, f4) end +function flux_hllc(u_ll, u_rr, normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + # Calculate primitive variables and speed of sound + rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations) + + v_dot_n_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] + v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] + + norm_ = norm(normal_direction) + norm_sq = norm_ * norm_ + inv_norm_sq = inv(norm_sq) + + c_ll = sqrt(equations.gamma * p_ll / rho_ll) * norm_ + c_rr = sqrt(equations.gamma * p_rr / rho_rr) * norm_ + + # Obtain left and right fluxes + f_ll = flux(u_ll, normal_direction, equations) + f_rr = flux(u_rr, normal_direction, equations) + + # Compute Roe averages + sqrt_rho_ll = sqrt(rho_ll) + sqrt_rho_rr = sqrt(rho_rr) + sum_sqrt_rho = sqrt_rho_ll + sqrt_rho_rr + + v1_roe = (sqrt_rho_ll * v1_ll + sqrt_rho_rr * v1_rr) / sum_sqrt_rho + v2_roe = (sqrt_rho_ll * v2_ll + sqrt_rho_rr * v2_rr) / sum_sqrt_rho + vel_roe = v1_roe * normal_direction[1] + v2_roe * normal_direction[2] + vel_roe_mag = v1_roe^2 + v2_roe^2 + + e_ll = u_ll[4] / rho_ll + e_rr = u_rr[4] / rho_rr + + H_ll = (u_ll[4] + p_ll) / rho_ll + H_rr = (u_rr[4] + p_rr) / rho_rr + + H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho + c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag)) * norm_ + + Ssl = min(v_dot_n_ll - c_ll, vel_roe - c_roe) + Ssr = max(v_dot_n_rr + c_rr, vel_roe + c_roe) + sMu_L = Ssl - v_dot_n_ll + sMu_R = Ssr - v_dot_n_rr + + if Ssl >= 0.0 + f1 = f_ll[1] + f2 = f_ll[2] + f3 = f_ll[3] + f4 = f_ll[4] + elseif Ssr <= 0.0 + f1 = f_rr[1] + f2 = f_rr[2] + f3 = f_rr[3] + f4 = f_rr[4] + else + SStar = (rho_ll * v_dot_n_ll * sMu_L - rho_rr * v_dot_n_rr * sMu_R + + (p_rr - p_ll) * norm_sq) / (rho_ll * sMu_L - rho_rr * sMu_R) + if Ssl <= 0.0 <= SStar + densStar = rho_ll * sMu_L / (Ssl - SStar) + enerStar = e_ll + + (SStar - v_dot_n_ll) * + (SStar * inv_norm_sq + p_ll / (rho_ll * sMu_L)) + UStar1 = densStar + UStar2 = densStar * + (v1_ll + (SStar - v_dot_n_ll) * normal_direction[1] * inv_norm_sq) + UStar3 = densStar * + (v2_ll + (SStar - v_dot_n_ll) * normal_direction[2] * inv_norm_sq) + UStar4 = densStar * enerStar + f1 = f_ll[1] + Ssl * (UStar1 - u_ll[1]) + f2 = f_ll[2] + Ssl * (UStar2 - u_ll[2]) + f3 = f_ll[3] + Ssl * (UStar3 - u_ll[3]) + f4 = f_ll[4] + Ssl * (UStar4 - u_ll[4]) + else + densStar = rho_rr * sMu_R / (Ssr - SStar) + enerStar = e_rr + + (SStar - v_dot_n_rr) * + (SStar * inv_norm_sq + p_rr / (rho_rr * sMu_R)) + UStar1 = densStar + UStar2 = densStar * + (v1_rr + (SStar - v_dot_n_rr) * normal_direction[1] * inv_norm_sq) + UStar3 = densStar * + (v2_rr + (SStar - v_dot_n_rr) * normal_direction[2] * inv_norm_sq) + UStar4 = densStar * enerStar + f1 = f_rr[1] + Ssr * (UStar1 - u_rr[1]) + f2 = f_rr[2] + Ssr * (UStar2 - u_rr[2]) + f3 = f_rr[3] + Ssr * (UStar3 - u_rr[3]) + f4 = f_rr[4] + Ssr * (UStar4 - u_rr[4]) + end + end + return SVector(f1, f2, f3, f4) +end + """ min_max_speed_einfeldt(u_ll, u_rr, orientation, equations::CompressibleEulerEquations2D) diff --git a/src/equations/compressible_euler_3d.jl b/src/equations/compressible_euler_3d.jl index fc56f58025b..82c4a7efa32 100644 --- a/src/equations/compressible_euler_3d.jl +++ b/src/equations/compressible_euler_3d.jl @@ -1192,7 +1192,7 @@ end end """ - flux_hllc(u_ll, u_rr, orientation, equations::CompressibleEulerEquations3D) + flux_hllc(u_ll, u_rr, orientation_or_normal_direction, equations::CompressibleEulerEquations3D) Computes the HLLC flux (HLL with Contact) for compressible Euler equations developed by E.F. Toro [Lecture slides](http://www.prague-sum.com/download/2012/Toro_2-HLLC-RiemannSolver.pdf) @@ -1231,25 +1231,22 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, if orientation == 1 # x-direction vel_L = v1_ll vel_R = v1_rr - ekin_roe = (sqrt_rho_ll * v2_ll + sqrt_rho_rr * v2_rr)^2 + - (sqrt_rho_ll * v3_ll + sqrt_rho_rr * v3_rr)^2 elseif orientation == 2 # y-direction vel_L = v2_ll vel_R = v2_rr - ekin_roe = (sqrt_rho_ll * v1_ll + sqrt_rho_rr * v1_rr)^2 + - (sqrt_rho_ll * v3_ll + sqrt_rho_rr * v3_rr)^2 else # z-direction vel_L = v3_ll vel_R = v3_rr - ekin_roe = (sqrt_rho_ll * v1_ll + sqrt_rho_rr * v1_rr)^2 + - (sqrt_rho_ll * v2_ll + sqrt_rho_rr * v2_rr)^2 end vel_roe = (sqrt_rho_ll * vel_L + sqrt_rho_rr * vel_R) / sum_sqrt_rho - ekin_roe = 0.5 * (vel_roe^2 + ekin_roe / sum_sqrt_rho^2) + v1_roe = sqrt_rho_ll * v1_ll + sqrt_rho_rr * v1_rr + v2_roe = sqrt_rho_ll * v2_ll + sqrt_rho_rr * v2_rr + v3_roe = sqrt_rho_ll * v3_ll + sqrt_rho_rr * v3_rr + vel_roe_mag = (v1_roe^2 + v2_roe^2 + v3_roe^2) / sum_sqrt_rho^2 H_ll = (rho_e_ll + p_ll) / rho_ll H_rr = (rho_e_rr + p_rr) / rho_rr H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho - c_roe = sqrt((equations.gamma - 1) * (H_roe - ekin_roe)) + c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag)) Ssl = min(vel_L - c_ll, vel_roe - c_roe) Ssr = max(vel_R + c_rr, vel_roe + c_roe) sMu_L = Ssl - vel_L @@ -1321,6 +1318,110 @@ function flux_hllc(u_ll, u_rr, orientation::Integer, return SVector(f1, f2, f3, f4, f5) end +function flux_hllc(u_ll, u_rr, normal_direction::AbstractVector, + equations::CompressibleEulerEquations3D) + # Calculate primitive variables and speed of sound + rho_ll, v1_ll, v2_ll, v3_ll, p_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, v3_rr, p_rr = cons2prim(u_rr, equations) + + v_dot_n_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] + + v3_ll * normal_direction[3] + v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] + + v3_rr * normal_direction[3] + + norm_ = norm(normal_direction) + norm_sq = norm_ * norm_ + inv_norm_sq = inv(norm_sq) + + c_ll = sqrt(equations.gamma * p_ll / rho_ll) * norm_ + c_rr = sqrt(equations.gamma * p_rr / rho_rr) * norm_ + + # Obtain left and right fluxes + f_ll = flux(u_ll, normal_direction, equations) + f_rr = flux(u_rr, normal_direction, equations) + + # Compute Roe averages + sqrt_rho_ll = sqrt(rho_ll) + sqrt_rho_rr = sqrt(rho_rr) + sum_sqrt_rho = sqrt_rho_ll + sqrt_rho_rr + + v1_roe = (sqrt_rho_ll * v1_ll + sqrt_rho_rr * v1_rr) / sum_sqrt_rho + v2_roe = (sqrt_rho_ll * v2_ll + sqrt_rho_rr * v2_rr) / sum_sqrt_rho + v3_roe = (sqrt_rho_ll * v3_ll + sqrt_rho_rr * v3_rr) / sum_sqrt_rho + vel_roe = v1_roe * normal_direction[1] + v2_roe * normal_direction[2] + + v3_roe * normal_direction[3] + vel_roe_mag = v1_roe^2 + v2_roe^2 + v3_roe^2 + + e_ll = u_ll[5] / rho_ll + e_rr = u_rr[5] / rho_rr + + H_ll = (u_ll[5] + p_ll) / rho_ll + H_rr = (u_rr[5] + p_rr) / rho_rr + + H_roe = (sqrt_rho_ll * H_ll + sqrt_rho_rr * H_rr) / sum_sqrt_rho + c_roe = sqrt((equations.gamma - 1) * (H_roe - 0.5 * vel_roe_mag)) * norm_ + + Ssl = min(v_dot_n_ll - c_ll, vel_roe - c_roe) + Ssr = max(v_dot_n_rr + c_rr, vel_roe + c_roe) + sMu_L = Ssl - v_dot_n_ll + sMu_R = Ssr - v_dot_n_rr + + if Ssl >= 0.0 + f1 = f_ll[1] + f2 = f_ll[2] + f3 = f_ll[3] + f4 = f_ll[4] + f5 = f_ll[5] + elseif Ssr <= 0.0 + f1 = f_rr[1] + f2 = f_rr[2] + f3 = f_rr[3] + f4 = f_rr[4] + f5 = f_rr[5] + else + SStar = (rho_ll * v_dot_n_ll * sMu_L - rho_rr * v_dot_n_rr * sMu_R + + (p_rr - p_ll) * norm_sq) / (rho_ll * sMu_L - rho_rr * sMu_R) + if Ssl <= 0.0 <= SStar + densStar = rho_ll * sMu_L / (Ssl - SStar) + enerStar = e_ll + + (SStar - v_dot_n_ll) * + (SStar * inv_norm_sq + p_ll / (rho_ll * sMu_L)) + UStar1 = densStar + UStar2 = densStar * + (v1_ll + (SStar - v_dot_n_ll) * normal_direction[1] * inv_norm_sq) + UStar3 = densStar * + (v2_ll + (SStar - v_dot_n_ll) * normal_direction[2] * inv_norm_sq) + UStar4 = densStar * + (v3_ll + (SStar - v_dot_n_ll) * normal_direction[3] * inv_norm_sq) + UStar5 = densStar * enerStar + f1 = f_ll[1] + Ssl * (UStar1 - u_ll[1]) + f2 = f_ll[2] + Ssl * (UStar2 - u_ll[2]) + f3 = f_ll[3] + Ssl * (UStar3 - u_ll[3]) + f4 = f_ll[4] + Ssl * (UStar4 - u_ll[4]) + f5 = f_ll[5] + Ssl * (UStar5 - u_ll[5]) + else + densStar = rho_rr * sMu_R / (Ssr - SStar) + enerStar = e_rr + + (SStar - v_dot_n_rr) * + (SStar * inv_norm_sq + p_rr / (rho_rr * sMu_R)) + UStar1 = densStar + UStar2 = densStar * + (v1_rr + (SStar - v_dot_n_rr) * normal_direction[1] * inv_norm_sq) + UStar3 = densStar * + (v2_rr + (SStar - v_dot_n_rr) * normal_direction[2] * inv_norm_sq) + UStar4 = densStar * + (v3_rr + (SStar - v_dot_n_rr) * normal_direction[3] * inv_norm_sq) + UStar5 = densStar * enerStar + f1 = f_rr[1] + Ssr * (UStar1 - u_rr[1]) + f2 = f_rr[2] + Ssr * (UStar2 - u_rr[2]) + f3 = f_rr[3] + Ssr * (UStar3 - u_rr[3]) + f4 = f_rr[4] + Ssr * (UStar4 - u_rr[4]) + f5 = f_rr[5] + Ssr * (UStar5 - u_rr[5]) + end + end + return SVector(f1, f2, f3, f4, f5) +end + """ min_max_speed_einfeldt(u_ll, u_rr, orientation, equations::CompressibleEulerEquations3D) diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index db34aecc168..cebc2917d52 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -203,6 +203,32 @@ end end end +@trixi_testset "elixir_euler_sedov.jl with HLLC Flux" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"), + l2=[ + 0.4229948321239887, + 0.2559038337457483, + 0.2559038337457484, + 1.2990046683564136, + ], + linf=[ + 1.4989357969730492, + 1.325456585141623, + 1.3254565851416251, + 6.331283015053501, + ], + surface_flux=flux_hllc, + tspan=(0.0, 0.3)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_euler_sedov.jl (HLLE)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"), l2=[ diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index f2467f30204..4a2d2112c99 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -234,6 +234,34 @@ end end end +@trixi_testset "elixir_euler_free_stream_extruded.jl with HLLC FLux" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_extruded.jl"), + l2=[ + 8.444868392439035e-16, + 4.889826056731442e-15, + 2.2921260987087585e-15, + 4.268460455702414e-15, + 1.1356712092620279e-14, + ], + linf=[ + 7.749356711883593e-14, + 4.513472928735496e-13, + 2.9790059308254513e-13, + 1.057154364048074e-12, + 1.6271428648906294e-12, + ], + tspan=(0.0, 0.1), + surface_flux=flux_hllc) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_euler_ec.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), l2=[ diff --git a/test/test_unit.jl b/test/test_unit.jl index b3ed29d38e3..817b4cd550d 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -1076,6 +1076,46 @@ end end end +@timed_testset "Consistency check for HLLC flux: CEE" begin + # Set up equations and dummy conservative variables state + equations = CompressibleEulerEquations2D(1.4) + u = SVector(1.1, -0.5, 2.34, 5.5) + + orientations = [1, 2] + for orientation in orientations + @test flux_hllc(u, u, orientation, equations) ≈ flux(u, orientation, equations) + end + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + + for normal_direction in normal_directions + @test flux_hllc(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end + + equations = CompressibleEulerEquations3D(1.4) + u = SVector(1.1, -0.5, 2.34, 2.4, 5.5) + + orientations = [1, 2, 3] + for orientation in orientations + @test flux_hllc(u, u, orientation, equations) ≈ flux(u, orientation, equations) + end + + normal_directions = [SVector(1.0, 0.0, 0.0), + SVector(0.0, 1.0, 0.0), + SVector(0.0, 0.0, 1.0), + SVector(0.5, -0.5, 0.2), + SVector(-1.2, 0.3, 1.4)] + + for normal_direction in normal_directions + @test flux_hllc(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end +end + @timed_testset "Consistency check for Godunov flux" begin # Set up equations and dummy conservative variables state # Burgers' Equation @@ -1280,7 +1320,7 @@ end u_values = [SVector(1.0, 0.5, -0.7, 1.0), SVector(1.5, -0.2, 0.1, 5.0)] fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, - flux_hll, FluxHLL(min_max_speed_davis), flux_hlle] + flux_hll, FluxHLL(min_max_speed_davis), flux_hlle, flux_hllc] for f_std in fluxes f_rot = FluxRotated(f_std) @@ -1303,8 +1343,8 @@ end u_values = [SVector(1.0, 0.5, -0.7, 0.1, 1.0), SVector(1.5, -0.2, 0.1, 0.2, 5.0)] fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, - FluxLMARS(340), - flux_hll, FluxHLL(min_max_speed_davis), flux_hlle] + FluxLMARS(340), flux_hll, FluxHLL(min_max_speed_davis), flux_hlle, flux_hllc, + ] for f_std in fluxes f_rot = FluxRotated(f_std) From ad384c6530b2539175167350b6e56a9365d3521c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Jan 2024 07:24:11 +0100 Subject: [PATCH 065/166] Bump crate-ci/typos from 1.16.23 to 1.16.26 (#1793) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.23 to 1.16.26. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.16.23...v1.16.26) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/SpellCheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index 366cb1183a0..a780e975155 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.16.23 + uses: crate-ci/typos@v1.16.26 From 1b6a4a941f72f0e9b95437cbfa2d5ba0ca4550bf Mon Sep 17 00:00:00 2001 From: Jesse Chan <1156048+jlchan@users.noreply.github.com> Date: Thu, 4 Jan 2024 13:00:05 -0600 Subject: [PATCH 066/166] Extend `CompressibleEulerQuasi1D` and `ShallowWaterQuasi1D` to `DGMulti` (#1797) * adding DGMulti versions of fluxes * remove incorrect factor of 2 * add example and test * formatting * add comment * revert removing factor of 2 * formatting * add SWE quasi-1D test d * enable quasi1D SWE for DGMulti * add docstrings * formatting * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Hendrik Ranocha * adding comments explaining why `normal_direction` is included in 1D * Apply suggestions from code review Co-authored-by: Daniel Doehring --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring --- examples/dgmulti_1d/elixir_euler_quasi_1d.jl | 49 +++++++++++++++++ .../elixir_shallow_water_quasi_1d.jl | 49 +++++++++++++++++ src/equations/compressible_euler_1d.jl | 5 ++ src/equations/compressible_euler_quasi_1d.jl | 38 ++++++++++++- src/equations/shallow_water_quasi_1d.jl | 35 ++++++++++++ test/test_dgmulti_1d.jl | 53 +++++++++++++++++++ 6 files changed, 227 insertions(+), 2 deletions(-) create mode 100644 examples/dgmulti_1d/elixir_euler_quasi_1d.jl create mode 100644 examples/dgmulti_1d/elixir_shallow_water_quasi_1d.jl diff --git a/examples/dgmulti_1d/elixir_euler_quasi_1d.jl b/examples/dgmulti_1d/elixir_euler_quasi_1d.jl new file mode 100644 index 00000000000..19269fa925b --- /dev/null +++ b/examples/dgmulti_1d/elixir_euler_quasi_1d.jl @@ -0,0 +1,49 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# Semidiscretization of the quasi 1d compressible Euler equations +# See Chan et al. https://doi.org/10.48550/arXiv.2307.12089 for details + +equations = CompressibleEulerEquationsQuasi1D(1.4) + +initial_condition = initial_condition_convergence_test + +surface_flux = (flux_chan_etal, flux_nonconservative_chan_etal) +volume_flux = surface_flux +dg = DGMulti(polydeg = 4, element_type = Line(), approximation_type = SBP(), + surface_integral = SurfaceIntegralWeakForm(surface_flux), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +cells_per_dimension = (8,) +mesh = DGMultiMesh(dg, cells_per_dimension, + coordinates_min = (-1.0,), coordinates_max = (1.0,), periodicity = true) +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, dg; + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, uEltype = real(dg)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) +stepsize_callback = StepsizeCallback(cfl = 0.8) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/dgmulti_1d/elixir_shallow_water_quasi_1d.jl b/examples/dgmulti_1d/elixir_shallow_water_quasi_1d.jl new file mode 100644 index 00000000000..85741f9dbd3 --- /dev/null +++ b/examples/dgmulti_1d/elixir_shallow_water_quasi_1d.jl @@ -0,0 +1,49 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# Semidiscretization of the quasi 1d shallow water equations +# See Chan et al. https://doi.org/10.48550/arXiv.2307.12089 for details + +equations = ShallowWaterEquationsQuasi1D(gravity_constant = 9.81) + +initial_condition = initial_condition_convergence_test + +volume_flux = (flux_chan_etal, flux_nonconservative_chan_etal) +surface_flux = (FluxPlusDissipation(flux_chan_etal, DissipationLocalLaxFriedrichs()), + flux_nonconservative_chan_etal) + +dg = DGMulti(polydeg = 4, element_type = Line(), approximation_type = SBP(), + surface_integral = SurfaceIntegralWeakForm(surface_flux), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +cells_per_dimension = (8,) +mesh = DGMultiMesh(dg, cells_per_dimension, + coordinates_min = (0.0,), coordinates_max = (sqrt(2),), + periodicity = true) +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, dg; + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, uEltype = real(dg)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-8, reltol = 1.0e-8, + ode_default_options()..., callback = callbacks) +summary_callback() # print the timer summary diff --git a/src/equations/compressible_euler_1d.jl b/src/equations/compressible_euler_1d.jl index 05c38ce791d..a50c896cd1c 100644 --- a/src/equations/compressible_euler_1d.jl +++ b/src/equations/compressible_euler_1d.jl @@ -408,6 +408,11 @@ See also return SVector(f1, f2, f3) end +# While `normal_direction` isn't strictly necessary in 1D, certain solvers assume that +# the normal component is incorporated into the numerical flux. +# +# See `flux(u, normal_direction::AbstractVector, equations::AbstractEquations{1})` for a +# similar implementation. @inline function flux_ranocha(u_ll, u_rr, normal_direction::AbstractVector, equations::CompressibleEulerEquations1D) return normal_direction[1] * flux_ranocha(u_ll, u_rr, 1, equations) diff --git a/src/equations/compressible_euler_quasi_1d.jl b/src/equations/compressible_euler_quasi_1d.jl index 0a543277ee4..6844bf9bee5 100644 --- a/src/equations/compressible_euler_quasi_1d.jl +++ b/src/equations/compressible_euler_quasi_1d.jl @@ -161,8 +161,12 @@ end end """ -@inline function flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer, - equations::CompressibleEulerEquationsQuasi1D) + flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquationsQuasi1D) + flux_nonconservative_chan_etal(u_ll, u_rr, normal_direction, + equations::CompressibleEulerEquationsQuasi1D) + flux_nonconservative_chan_etal(u_ll, u_rr, normal_ll, normal_rr, + equations::CompressibleEulerEquationsQuasi1D) Non-symmetric two-point volume flux discretizing the nonconservative (source) term that contains the gradient of the pressure [`CompressibleEulerEquationsQuasi1D`](@ref) @@ -190,6 +194,26 @@ Further details are available in the paper: return SVector(z, a_ll * p_avg, z, z) end +# While `normal_direction` isn't strictly necessary in 1D, certain solvers assume that +# the normal component is incorporated into the numerical flux. +# +# See `flux(u, normal_direction::AbstractVector, equations::AbstractEquations{1})` for a +# similar implementation. +@inline function flux_nonconservative_chan_etal(u_ll, u_rr, + normal_direction::AbstractVector, + equations::CompressibleEulerEquationsQuasi1D) + return normal_direction[1] * + flux_nonconservative_chan_etal(u_ll, u_rr, 1, equations) +end + +@inline function flux_nonconservative_chan_etal(u_ll, u_rr, + normal_ll::AbstractVector, + normal_rr::AbstractVector, + equations::CompressibleEulerEquationsQuasi1D) + # normal_ll should be equal to normal_rr in 1D + return flux_nonconservative_chan_etal(u_ll, u_rr, normal_ll, equations) +end + """ @inline function flux_chan_etal(u_ll, u_rr, orientation::Integer, equations::CompressibleEulerEquationsQuasi1D) @@ -230,6 +254,16 @@ Further details are available in the paper: return SVector(f1, f2, f3, zero(eltype(u_ll))) end +# While `normal_direction` isn't strictly necessary in 1D, certain solvers assume that +# the normal component is incorporated into the numerical flux. +# +# See `flux(u, normal_direction::AbstractVector, equations::AbstractEquations{1})` for a +# similar implementation. +@inline function flux_chan_etal(u_ll, u_rr, normal_direction::AbstractVector, + equations::CompressibleEulerEquationsQuasi1D) + return normal_direction[1] * flux_chan_etal(u_ll, u_rr, 1, equations) +end + # Calculate estimates for maximum wave speed for local Lax-Friedrichs-type dissipation as the # maximum velocity magnitude plus the maximum speed of sound @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, diff --git a/src/equations/shallow_water_quasi_1d.jl b/src/equations/shallow_water_quasi_1d.jl index d3935f0e75f..d52fbab841d 100644 --- a/src/equations/shallow_water_quasi_1d.jl +++ b/src/equations/shallow_water_quasi_1d.jl @@ -152,6 +152,11 @@ end """ flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquationsQuasi1D) + flux_nonconservative_chan_etal(u_ll, u_rr, normal_direction::AbstractVector, + equations::ShallowWaterEquationsQuasi1D) + flux_nonconservative_chan_etal(u_ll, u_rr, + normal_ll::AbstractVector, normal_rr::AbstractVector, + equations::ShallowWaterEquationsQuasi1D) Non-symmetric two-point volume flux discretizing the nonconservative (source) term that contains the gradient of the bottom topography [`ShallowWaterEquationsQuasi1D`](@ref) @@ -176,6 +181,26 @@ Further details are available in the paper: return SVector(z, equations.gravity * a_ll * h_ll * (h_rr + b_rr), z, z) end +# While `normal_direction` isn't strictly necessary in 1D, certain solvers assume that +# the normal component is incorporated into the numerical flux. +# +# See `flux(u, normal_direction::AbstractVector, equations::AbstractEquations{1})` for a +# similar implementation. +@inline function flux_nonconservative_chan_etal(u_ll, u_rr, + normal_direction::AbstractVector, + equations::ShallowWaterEquationsQuasi1D) + return normal_direction[1] * + flux_nonconservative_chan_etal(u_ll, u_rr, 1, equations) +end + +@inline function flux_nonconservative_chan_etal(u_ll, u_rr, + normal_ll::AbstractVector, + normal_rr::AbstractVector, + equations::ShallowWaterEquationsQuasi1D) + # normal_ll should be equal to normal_rr + return flux_nonconservative_chan_etal(u_ll, u_rr, normal_ll, equations) +end + """ flux_chan_etal(u_ll, u_rr, orientation, equations::ShallowWaterEquationsQuasi1D) @@ -204,6 +229,16 @@ Further details are available in the paper: return SVector(f1, f2, zero(eltype(u_ll)), zero(eltype(u_ll))) end +# While `normal_direction` isn't strictly necessary in 1D, certain solvers assume that +# the normal component is incorporated into the numerical flux. +# +# See `flux(u, normal_direction::AbstractVector, equations::AbstractEquations{1})` for a +# similar implementation. +@inline function flux_chan_etal(u_ll, u_rr, normal_direction::AbstractVector, + equations::ShallowWaterEquationsQuasi1D) + return normal_direction[1] * flux_chan_etal(u_ll, u_rr, 1, equations) +end + # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the # maximum velocity magnitude plus the maximum speed of sound @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, diff --git a/test/test_dgmulti_1d.jl b/test/test_dgmulti_1d.jl index 79ad64075b4..0363086341f 100644 --- a/test/test_dgmulti_1d.jl +++ b/test/test_dgmulti_1d.jl @@ -162,6 +162,59 @@ end @test minimum(dg.basis.rst[1]) ≈ -1 @test maximum(dg.basis.rst[1])≈1 atol=0.35 end + +# test non-conservative systems +@trixi_testset "elixir_shallow_water_quasi_1d.jl (SBP) " begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallow_water_quasi_1d.jl"), + cells_per_dimension=(8,), + approximation_type=SBP(), + l2=[ + 3.03001101100507e-6, + 1.692177335948727e-5, + 3.002634351734614e-16, + 1.1636653574178203e-15, + ], + linf=[ + 1.2043401988570679e-5, + 5.346847010329059e-5, + 9.43689570931383e-16, + 2.220446049250313e-15, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_euler_quasi_1d.jl (SBP) " begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_quasi_1d.jl"), + cells_per_dimension=(8,), + approximation_type=SBP(), + l2=[ + 1.633271343738687e-5, + 9.575385661756332e-6, + 1.2700331443128421e-5, + 0.0, + ], + linf=[ + 7.304984704381567e-5, + 5.2365944135601694e-5, + 6.469559594934893e-5, + 0.0, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory From a1e3e73c634563e3cde3f11d8ad3a66e6a611281 Mon Sep 17 00:00:00 2001 From: Patrick Ersing <114223904+patrickersing@users.noreply.github.com> Date: Tue, 9 Jan 2024 07:03:30 +0100 Subject: [PATCH 067/166] Fix boundary_condition_slip_wall for SWE (#1798) * fix_wall_bc * add test * apply formatter * adjust some comments * update elixir and test; add topography --- .../tree_2d_dgsem/elixir_shallowwater_wall.jl | 82 +++++++++++++++++++ src/equations/shallow_water_2d.jl | 8 +- test/test_tree_2d_shallowwater.jl | 25 ++++++ 3 files changed, 113 insertions(+), 2 deletions(-) create mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_wall.jl diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_wall.jl b/examples/tree_2d_dgsem/elixir_shallowwater_wall.jl new file mode 100644 index 00000000000..b8dbad50680 --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_shallowwater_wall.jl @@ -0,0 +1,82 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# Semidiscretization of the shallow water equations + +equations = ShallowWaterEquations2D(gravity_constant = 9.81, H0 = 3.25) + +# An initial condition with a bottom topography and a perturbation in the waterheight to test +# boundary_condition_slip_wall +function initial_condition_perturbation(x, t, equations::ShallowWaterEquations2D) + # Set the background values + H = equations.H0 + v1 = 0.0 + v2 = 0.0 + + # Bottom topography + b = 1.5 * exp(-0.5 * ((x[1])^2 + (x[2])^2)) + # Waterheight perturbation + H = H + 0.5 * exp(-10.0 * ((x[1])^2 + (x[2])^2)) + + return prim2cons(SVector(H, v1, v2, b), equations) +end + +initial_condition = initial_condition_perturbation + +boundary_condition = boundary_condition_slip_wall + +############################################################################### +# Get the DG approximation space + +volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) +surface_flux = (flux_lax_friedrichs, flux_nonconservative_ersing_etal) +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +############################################################################### +# Get the TreeMesh and setup a non-periodic mesh + +coordinates_min = (-1.0, -1.0) +coordinates_max = (1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000, + periodicity = false) + +# create the semi discretization object +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_condition) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.25) +ode = semidiscretize(semi, tspan) + +############################################################################### +# Callbacks + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 1000, + save_initial_solution = true, + save_final_solution = true) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/src/equations/shallow_water_2d.jl b/src/equations/shallow_water_2d.jl index e75c92a27d0..6728d7d5553 100644 --- a/src/equations/shallow_water_2d.jl +++ b/src/equations/shallow_water_2d.jl @@ -236,8 +236,12 @@ Should be used together with [`TreeMesh`](@ref). u_boundary = SVector(u_inner[1], u_inner[2], -u_inner[3], u_inner[4]) end - # compute and return the flux using `boundary_condition_slip_wall` routine above - flux = surface_flux_function(u_inner, u_boundary, orientation, equations) + # Calculate boundary flux + if iseven(direction) # u_inner is "left" of boundary, u_boundary is "right" of boundary + flux = surface_flux_function(u_inner, u_boundary, orientation, equations) + else # u_boundary is "left" of boundary, u_inner is "right" of boundary + flux = surface_flux_function(u_boundary, u_inner, orientation, equations) + end return flux end diff --git a/test/test_tree_2d_shallowwater.jl b/test/test_tree_2d_shallowwater.jl index 58db7c5f35f..1f3dfbf5267 100644 --- a/test/test_tree_2d_shallowwater.jl +++ b/test/test_tree_2d_shallowwater.jl @@ -327,6 +327,31 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_shallowwater_wall.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_wall.jl"), + l2=[ + 0.13517233723296504, + 0.20010876311162215, + 0.20010876311162223, + 2.719538414346464e-7, + ], + linf=[ + 0.5303607982988336, + 0.5080989745682338, + 0.5080989745682352, + 1.1301675764130437e-6, + ], + tspan=(0.0, 0.25)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end end # module From 7168d90aef7e07b4f0360cf805ace72532d980ae Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 9 Jan 2024 16:47:36 +0100 Subject: [PATCH 068/166] comments explaining usage of `ForwardDiff.jacobian` (#1800) --- src/semidiscretization/semidiscretization.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index fe7858e31ee..8518cf27fd3 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -253,6 +253,9 @@ end function _jacobian_ad_forward(semi, t0, u0_ode, du_ode, config) new_semi = remake(semi, uEltype = eltype(config)) + # Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match + # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, + # cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())` J = ForwardDiff.jacobian(du_ode, u0_ode, config) do du_ode, u_ode Trixi.rhs!(du_ode, u_ode, new_semi, t0) end @@ -279,6 +282,9 @@ end function _jacobian_ad_forward_structarrays(semi, t0, u0_ode_plain, du_ode_plain, config) new_semi = remake(semi, uEltype = eltype(config)) + # Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match + # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, + # cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())` J = ForwardDiff.jacobian(du_ode_plain, u0_ode_plain, config) do du_ode_plain, u_ode_plain u_ode = StructArray{SVector{nvariables(semi), eltype(config)}}(ntuple(v -> view(u_ode_plain, From 994bb4b088fbd08cf59296b8c5c6b3d33b810536 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jan 2024 16:48:15 +0100 Subject: [PATCH 069/166] Bump actions/upload-artifact from 3 to 4 (#1795) * Bump actions/upload-artifact from 3 to 4 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3 to 4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * bump download-artifact to v4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Hendrik Ranocha --- .github/workflows/benchmark.yml | 2 +- .github/workflows/ci.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 2ea30d6fddb..6aa4809c1c2 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -45,7 +45,7 @@ jobs: run: julia --project=benchmark/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Run benchmarks run: julia --project=benchmark/ --color=yes benchmark/run_benchmarks.jl - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: my-artifact path: benchmark/results*.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cf8107736e9..2c0ea798b49 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -153,7 +153,7 @@ jobs: - shell: bash run: | cp ./lcov.info ./lcov-${{ matrix.trixi_test }}-${{ matrix.os }}-${{ matrix.version }}-${{ matrix.arch }}.info - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: lcov-${{ matrix.trixi_test }}-${{ matrix.os }}-${{ matrix.version }}-${{ matrix.arch }} path: ./lcov-${{ matrix.trixi_test }}-${{ matrix.os }}-${{ matrix.version }}-${{ matrix.arch }}.info @@ -176,7 +176,7 @@ jobs: # At first, we check out the repository and download all artifacts # (and list files for debugging). - uses: actions/checkout@v4 - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 - run: ls -R # Next, we merge the individual coverage files and upload # the combined results to Coveralls. @@ -199,7 +199,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} path-to-lcov: ./lcov.info # Upload merged coverage data as artifact for debugging - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: lcov path: ./lcov.info From 08ae0f200f0e89ceae1a260ffed38539ad507c55 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Thu, 11 Jan 2024 16:34:15 +0100 Subject: [PATCH 070/166] Fix link to download notebook badge in documentation (#1801) * Add random change * Fix download links * Update link and rename variables * Update introduction file with new links --- docs/literate/make.jl | 10 +++++----- docs/literate/src/files/index.jl | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/literate/make.jl b/docs/literate/make.jl index a04d8a0b333..84e4fbdced6 100644 --- a/docs/literate/make.jl +++ b/docs/literate/make.jl @@ -10,17 +10,17 @@ function create_files(title, file, repo_src, pages_dir, notebooks_dir; folder="" end binder_logo = "https://mybinder.org/badge_logo.svg" - nbviewer_logo = "https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg" - download_logo = "https://camo.githubusercontent.com/aea75103f6d9f690a19cb0e17c06f984ab0f472d9e6fe4eadaa0cc438ba88ada/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f646f776e6c6f61642d6e6f7465626f6f6b2d627269676874677265656e" + nbviewer_logo = "https://img.shields.io/badge/render-nbviewer-f37726" + raw_notebook_logo = "https://img.shields.io/badge/raw-notebook-4cc61e" notebook_path = "tutorials/notebooks/$notebook_filename" binder_url = "https://mybinder.org/v2/gh/trixi-framework/Trixi.jl/tutorial_notebooks?filepath=$notebook_path" nbviewer_url = "https://nbviewer.jupyter.org/github/trixi-framework/Trixi.jl/blob/tutorial_notebooks/$notebook_path" - download_url = "https://raw.githubusercontent.com/trixi-framework/Trixi.jl/tutorial_notebooks/$notebook_path" + raw_notebook_url = "https://raw.githubusercontent.com/trixi-framework/Trixi.jl/tutorial_notebooks/$notebook_path" binder_badge = "# [![]($binder_logo)]($binder_url)" nbviewer_badge = "# [![]($nbviewer_logo)]($nbviewer_url)" - download_badge = "# [![]($download_logo)]($download_url)" + raw_notebook_badge = "# [![]($raw_notebook_logo)]($raw_notebook_url)" # Generate notebook file function preprocess_notebook(content) @@ -32,7 +32,7 @@ function create_files(title, file, repo_src, pages_dir, notebooks_dir; folder="" # Generate markdown file function preprocess_docs(content) - return string("# # [$title](@id $(splitext(file)[1]))\n $binder_badge\n $nbviewer_badge\n $download_badge\n\n", content) + return string("# # [$title](@id $(splitext(file)[1]))\n $binder_badge\n $nbviewer_badge\n $raw_notebook_badge\n\n", content) end Literate.markdown(joinpath(repo_src, folder, file), joinpath(pages_dir, folder); preprocess=preprocess_docs,) end diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl index d42695611f6..e259d25fb2f 100644 --- a/docs/literate/src/files/index.jl +++ b/docs/literate/src/files/index.jl @@ -5,9 +5,9 @@ # Right now, you are using the classic documentation. The corresponding interactive notebooks can # be opened in [Binder](https://mybinder.org/) and viewed in [nbviewer](https://nbviewer.jupyter.org/) -# via the icons ![](https://mybinder.org/badge_logo.svg) and ![](https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg) +# via the icons ![](https://mybinder.org/badge_logo.svg) and ![](https://img.shields.io/badge/render-nbviewer-f37726) # in the respective tutorial. -# You can download the raw notebooks from GitHub via ![](https://camo.githubusercontent.com/aea75103f6d9f690a19cb0e17c06f984ab0f472d9e6fe4eadaa0cc438ba88ada/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f646f776e6c6f61642d6e6f7465626f6f6b2d627269676874677265656e). +# You can also open the raw notebook files via ![](https://img.shields.io/badge/raw-notebook-4cc61e). # **Note:** To improve responsiveness via caching, the notebooks are updated only once a week. They are only # available for the latest stable release of Trixi.jl at the time of caching. From eb0f1252df72faa82f1a39cadeb26352c03e5d01 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 11 Jan 2024 17:01:46 +0100 Subject: [PATCH 071/166] set version to v0.6.6 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index faf9f82d335..b840b636c05 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.6-pre" +version = "0.6.6" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 148e5b0559f51cc2e16fc67d03ae51aa5df3334e Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 11 Jan 2024 17:02:10 +0100 Subject: [PATCH 072/166] set development version to v0.6.7-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index b840b636c05..f246bdfdab4 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.6" +version = "0.6.7-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 2463b38d60fd2bd463cd46252a29c08e706f1a5e Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 12 Jan 2024 11:16:21 +0100 Subject: [PATCH 073/166] use julia-actions/cache also for Documenter (#1802) --- .github/workflows/Documenter.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml index 129c41a3b5c..8f8d674ffa9 100644 --- a/.github/workflows/Documenter.yml +++ b/.github/workflows/Documenter.yml @@ -38,6 +38,7 @@ jobs: with: version: '1.9' show-versioninfo: true + - uses: julia-actions/cache@v1 - uses: julia-actions/julia-buildpkg@v1 env: PYTHON: "" From 4bb74f8505220b8f42ef15622097f96276584c13 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 12 Jan 2024 13:30:31 +0100 Subject: [PATCH 074/166] Add dry air warm bubble test case (#1779) * add LMARS flux in 2D * add dry air warm bubble test case * get formatting right * remove += * add DOI * cleaned up variables (naming, scope) * reduce run time of test case * Revert "reduce run time of test case" This reverts commit b6e527bd63767e0a124a274d57e986df9a88b766. * change output folder * change energy term in LMARS solver to use p_l/r * add lmars consistency checks * switched to kennedy gruber flux * add euler warm bubble elixir to tests * adapt errors due to change flux * add warm bubble test with TreeMesh * fix unit test * fix format * adapt polynomial degree and CFL number * fix format * adapt tests due to changed parameters * Update src/equations/compressible_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/compressible_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/compressible_euler_3d.jl Co-authored-by: Andrew Winters * Update src/equations/compressible_euler_3d.jl Co-authored-by: Andrew Winters * correct test result * use callable struct to hold parameters Thanks sloede! * Update examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl Co-authored-by: Hendrik Ranocha * Update examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl Co-authored-by: Hendrik Ranocha * year of Wicker paper, comment on tspan [no ci] * add comment on speed of sound --------- Co-authored-by: Andrew Winters Co-authored-by: Hendrik Ranocha --- .../elixir_euler_warm_bubble.jl | 146 +++++++++++++++++ .../tree_2d_dgsem/elixir_euler_warm_bubble.jl | 150 ++++++++++++++++++ src/equations/compressible_euler_2d.jl | 92 +++++++++++ src/equations/compressible_euler_3d.jl | 25 +-- test/test_p4est_3d.jl | 40 ++--- test/test_structured_2d.jl | 27 ++++ test/test_tree_2d_euler.jl | 26 +++ test/test_unit.jl | 46 +++++- 8 files changed, 519 insertions(+), 33 deletions(-) create mode 100644 examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl create mode 100644 examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl diff --git a/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl new file mode 100644 index 00000000000..05c09d57530 --- /dev/null +++ b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl @@ -0,0 +1,146 @@ +using OrdinaryDiffEq +using Trixi + +# Warm bubble test case from +# - Wicker, L. J., and Skamarock, W. C. (1998) +# A time-splitting scheme for the elastic equations incorporating +# second-order Runge–Kutta time differencing +# [DOI: 10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2](https://doi.org/10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2) +# See also +# - Bryan and Fritsch (2002) +# A Benchmark Simulation for Moist Nonhydrostatic Numerical Models +# [DOI: 10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2](https://doi.org/10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2) +# - Carpenter, Droegemeier, Woodward, Hane (1990) +# Application of the Piecewise Parabolic Method (PPM) to +# Meteorological Modeling +# [DOI: 10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2](https://doi.org/10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2) +struct WarmBubbleSetup + # Physical constants + g::Float64 # gravity of earth + c_p::Float64 # heat capacity for constant pressure (dry air) + c_v::Float64 # heat capacity for constant volume (dry air) + gamma::Float64 # heat capacity ratio (dry air) + + function WarmBubbleSetup(; g = 9.81, c_p = 1004.0, c_v = 717.0, gamma = c_p / c_v) + new(g, c_p, c_v, gamma) + end +end + +# Initial condition +function (setup::WarmBubbleSetup)(x, t, equations::CompressibleEulerEquations2D) + @unpack g, c_p, c_v = setup + + # center of perturbation + center_x = 10000.0 + center_z = 2000.0 + # radius of perturbation + radius = 2000.0 + # distance of current x to center of perturbation + r = sqrt((x[1] - center_x)^2 + (x[2] - center_z)^2) + + # perturbation in potential temperature + potential_temperature_ref = 300.0 + potential_temperature_perturbation = 0.0 + if r <= radius + potential_temperature_perturbation = 2 * cospi(0.5 * r / radius)^2 + end + potential_temperature = potential_temperature_ref + potential_temperature_perturbation + + # Exner pressure, solves hydrostatic equation for x[2] + exner = 1 - g / (c_p * potential_temperature) * x[2] + + # pressure + p_0 = 100_000.0 # reference pressure + R = c_p - c_v # gas constant (dry air) + p = p_0 * exner^(c_p / R) + + # temperature + T = potential_temperature * exner + + # density + rho = p / (R * T) + + v1 = 20.0 + v2 = 0.0 + E = c_v * T + 0.5 * (v1^2 + v2^2) + return SVector(rho, rho * v1, rho * v2, rho * E) +end + +# Source terms +@inline function (setup::WarmBubbleSetup)(u, x, t, equations::CompressibleEulerEquations2D) + @unpack g = setup + rho, _, rho_v2, _ = u + return SVector(zero(eltype(u)), zero(eltype(u)), -g * rho, -g * rho_v2) +end + +############################################################################### +# semidiscretization of the compressible Euler equations +warm_bubble_setup = WarmBubbleSetup() + +equations = CompressibleEulerEquations2D(warm_bubble_setup.gamma) + +boundary_conditions = (x_neg = boundary_condition_periodic, + x_pos = boundary_condition_periodic, + y_neg = boundary_condition_slip_wall, + y_pos = boundary_condition_slip_wall) + +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +# This is a good estimate for the speed of sound in this example. +# Other values between 300 and 400 should work as well. +surface_flux = FluxLMARS(340.0) + +volume_flux = flux_kennedy_gruber +volume_integral = VolumeIntegralFluxDifferencing(volume_flux) + +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (0.0, 0.0) +coordinates_max = (20_000.0, 10_000.0) + +cells_per_dimension = (64, 32) +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) + +semi = SemidiscretizationHyperbolic(mesh, equations, warm_bubble_setup, solver, + source_terms = warm_bubble_setup, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1000.0) # 1000 seconds final time + +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 + +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_errors = (:entropy_conservation_error,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = analysis_interval, + save_initial_solution = true, + save_final_solution = true, + output_directory = "out", + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + maxiters = 1.0e7, + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() diff --git a/examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl b/examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl new file mode 100644 index 00000000000..f2e14273ae7 --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl @@ -0,0 +1,150 @@ +using OrdinaryDiffEq +using Trixi + +# Warm bubble test case from +# - Wicker, L. J., and Skamarock, W. C. (1998) +# A time-splitting scheme for the elastic equations incorporating +# second-order Runge–Kutta time differencing +# [DOI: 10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2](https://doi.org/10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2) +# See also +# - Bryan and Fritsch (2002) +# A Benchmark Simulation for Moist Nonhydrostatic Numerical Models +# [DOI: 10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2](https://doi.org/10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2) +# - Carpenter, Droegemeier, Woodward, Hane (1990) +# Application of the Piecewise Parabolic Method (PPM) to +# Meteorological Modeling +# [DOI: 10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2](https://doi.org/10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2) +struct WarmBubbleSetup + # Physical constants + g::Float64 # gravity of earth + c_p::Float64 # heat capacity for constant pressure (dry air) + c_v::Float64 # heat capacity for constant volume (dry air) + gamma::Float64 # heat capacity ratio (dry air) + + function WarmBubbleSetup(; g = 9.81, c_p = 1004.0, c_v = 717.0, gamma = c_p / c_v) + new(g, c_p, c_v, gamma) + end +end + +# Initial condition +function (setup::WarmBubbleSetup)(x, t, equations::CompressibleEulerEquations2D) + @unpack g, c_p, c_v = setup + + # center of perturbation + center_x = 10000.0 + center_z = 2000.0 + # radius of perturbation + radius = 2000.0 + # distance of current x to center of perturbation + r = sqrt((x[1] - center_x)^2 + (x[2] - center_z)^2) + + # perturbation in potential temperature + potential_temperature_ref = 300.0 + potential_temperature_perturbation = 0.0 + if r <= radius + potential_temperature_perturbation = 2 * cospi(0.5 * r / radius)^2 + end + potential_temperature = potential_temperature_ref + potential_temperature_perturbation + + # Exner pressure, solves hydrostatic equation for x[2] + exner = 1 - g / (c_p * potential_temperature) * x[2] + + # pressure + p_0 = 100_000.0 # reference pressure + R = c_p - c_v # gas constant (dry air) + p = p_0 * exner^(c_p / R) + + # temperature + T = potential_temperature * exner + + # density + rho = p / (R * T) + + v1 = 20.0 + v2 = 0.0 + E = c_v * T + 0.5 * (v1^2 + v2^2) + return SVector(rho, rho * v1, rho * v2, rho * E) +end + +# Source terms +@inline function (setup::WarmBubbleSetup)(u, x, t, equations::CompressibleEulerEquations2D) + @unpack g = setup + rho, _, rho_v2, _ = u + return SVector(zero(eltype(u)), zero(eltype(u)), -g * rho, -g * rho_v2) +end + +############################################################################### +# semidiscretization of the compressible Euler equations +warm_bubble_setup = WarmBubbleSetup() + +equations = CompressibleEulerEquations2D(warm_bubble_setup.gamma) + +boundary_conditions = (x_neg = boundary_condition_periodic, + x_pos = boundary_condition_periodic, + y_neg = boundary_condition_slip_wall, + y_pos = boundary_condition_slip_wall) + +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +# This is a good estimate for the speed of sound in this example. +# Other values between 300 and 400 should work as well. +surface_flux = FluxLMARS(340.0) + +volume_flux = flux_kennedy_gruber +volume_integral = VolumeIntegralFluxDifferencing(volume_flux) + +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (0.0, 0.0) +coordinates_max = (20_000.0, 10_000.0) + +# Same coordinates as in examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl +# However TreeMesh will generate a 20_000 x 20_000 square domain instead +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 6, + n_cells_max = 10_000, + periodicity = (true, false)) + +semi = SemidiscretizationHyperbolic(mesh, equations, warm_bubble_setup, solver, + source_terms = warm_bubble_setup, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1000.0) # 1000 seconds final time + +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 + +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_errors = (:entropy_conservation_error,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = analysis_interval, + save_initial_solution = true, + save_final_solution = true, + output_directory = "out", + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + maxiters = 1.0e7, + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl index b0fd5c53f45..3c6f759db2b 100644 --- a/src/equations/compressible_euler_2d.jl +++ b/src/equations/compressible_euler_2d.jl @@ -809,6 +809,98 @@ end return SVector(f1m, f2m, f3m, f4m) end +""" + FluxLMARS(c)(u_ll, u_rr, orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + +Low Mach number approximate Riemann solver (LMARS) for atmospheric flows using +an estimate `c` of the speed of sound. + +References: +- Xi Chen et al. (2013) + A Control-Volume Model of the Compressible Euler Equations with a Vertical + Lagrangian Coordinate + [DOI: 10.1175/MWR-D-12-00129.1](https://doi.org/10.1175/mwr-d-12-00129.1) +""" +struct FluxLMARS{SpeedOfSound} + # Estimate for the speed of sound + speed_of_sound::SpeedOfSound +end + +@inline function (flux_lmars::FluxLMARS)(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquations2D) + c = flux_lmars.speed_of_sound + + # Unpack left and right state + rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations) + + if orientation == 1 + v_ll = v1_ll + v_rr = v1_rr + else # orientation == 2 + v_ll = v2_ll + v_rr = v2_rr + end + + rho = 0.5 * (rho_ll + rho_rr) + p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) + v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) + + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p + if v >= 0 + f1, f2, f3, f4 = v * u_ll + f4 = f4 + p_ll * v + else + f1, f2, f3, f4 = v * u_rr + f4 = f4 + p_rr * v + end + + if orientation == 1 + f2 = f2 + p + else # orientation == 2 + f3 = f3 + p + end + + return SVector(f1, f2, f3, f4) +end + +@inline function (flux_lmars::FluxLMARS)(u_ll, u_rr, normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + c = flux_lmars.speed_of_sound + + # Unpack left and right state + rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations) + + v_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] + v_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] + + # Note that this is the same as computing v_ll and v_rr with a normalized normal vector + # and then multiplying v by `norm_` again, but this version is slightly faster. + norm_ = norm(normal_direction) + + rho = 0.5 * (rho_ll + rho_rr) + p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) / norm_ + v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_ + + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p + if v >= 0 + f1, f2, f3, f4 = u_ll * v + f4 = f4 + p_ll * v + else + f1, f2, f3, f4 = u_rr * v + f4 = f4 + p_rr * v + end + + return SVector(f1, + f2 + p * normal_direction[1], + f3 + p * normal_direction[2], + f4) +end + """ splitting_vanleer_haenel(u, orientation::Integer, equations::CompressibleEulerEquations2D) diff --git a/src/equations/compressible_euler_3d.jl b/src/equations/compressible_euler_3d.jl index 82c4a7efa32..292b912f009 100644 --- a/src/equations/compressible_euler_3d.jl +++ b/src/equations/compressible_euler_3d.jl @@ -944,11 +944,6 @@ References: Lagrangian Coordinate [DOI: 10.1175/MWR-D-12-00129.1](https://doi.org/10.1175/mwr-d-12-00129.1) """ -struct FluxLMARS{SpeedOfSound} - # Estimate for the speed of sound - speed_of_sound::SpeedOfSound -end - @inline function (flux_lmars::FluxLMARS)(u_ll, u_rr, orientation::Integer, equations::CompressibleEulerEquations3D) c = flux_lmars.speed_of_sound @@ -972,10 +967,14 @@ end p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p if v >= 0 f1, f2, f3, f4, f5 = v * u_ll + f5 = f5 + p_ll * v else f1, f2, f3, f4, f5 = v * u_rr + f5 = f5 + p_rr * v end if orientation == 1 @@ -985,7 +984,6 @@ end else # orientation == 3 f4 += p end - f5 += p * v return SVector(f1, f2, f3, f4, f5) end @@ -1011,18 +1009,21 @@ end p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) / norm_ v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_ + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p if v >= 0 f1, f2, f3, f4, f5 = v * u_ll + f5 = f5 + p_ll * v else f1, f2, f3, f4, f5 = v * u_rr + f5 = f5 + p_rr * v end - f2 += p * normal_direction[1] - f3 += p * normal_direction[2] - f4 += p * normal_direction[3] - f5 += p * v - - return SVector(f1, f2, f3, f4, f5) + return SVector(f1, + f2 + p * normal_direction[1], + f3 + p * normal_direction[2], + f4 + p * normal_direction[3], + f5) end # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index 4a2d2112c99..dc5d32b5a04 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -380,18 +380,18 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_circular_wind_nonconforming.jl"), l2=[ - 1.573832094977477e-7, - 3.863090659429634e-5, - 3.867293305754584e-5, - 3.686550296950078e-5, - 0.05508968493733932, + 1.5737711609657832e-7, + 3.8630261900166194e-5, + 3.8672287531936816e-5, + 3.6865116098660796e-5, + 0.05508620970403884, ], linf=[ - 2.2695202613887133e-6, - 0.0005314968179916946, - 0.0005314969614147458, - 0.0005130280733059617, - 0.7944959432352334, + 2.268845333053271e-6, + 0.000531462302113539, + 0.0005314624461298934, + 0.0005129931254772464, + 0.7942778058932163, ], tspan=(0.0, 2e2), coverage_override=(trees_per_cube_face = (1, 1), polydeg = 3)) # Prevent long compile time in CI @@ -409,18 +409,18 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_baroclinic_instability.jl"), l2=[ - 6.725065410642336e-7, - 0.00021710117340245454, - 0.000438679759422352, - 0.00020836356588024185, - 0.07602006689579247, + 6.725093801700048e-7, + 0.00021710076010951073, + 0.0004386796338203878, + 0.00020836270267103122, + 0.07601887903440395, ], linf=[ - 1.9101671995258585e-5, - 0.029803626911022396, - 0.04847630924006063, - 0.022001371349740104, - 4.847761006938526, + 1.9107530539574924e-5, + 0.02980358831035801, + 0.048476331898047564, + 0.02200137344113612, + 4.848310144356219, ], tspan=(0.0, 1e2), # Decrease tolerance of adaptive time stepping to get similar results across different systems diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 1addc29e3e6..e5d45ebcc07 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -611,6 +611,33 @@ end end end +@trixi_testset "elixir_euler_warm_bubble.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_warm_bubble.jl"), + l2=[ + 0.00019387402388722496, + 0.03086514388623955, + 0.04541427917165, + 43.892826583444716, + ], + linf=[ + 0.0015942305974430138, + 0.17449778969139373, + 0.3729704262394843, + 307.6706958565337, + ], + cells_per_dimension=(32, 16), + tspan=(0.0, 10.0)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 100 + end +end + @trixi_testset "elixir_eulerpolytropic_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"), l2=[ diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl index 65899cd5263..61b5c54b5e9 100644 --- a/test/test_tree_2d_euler.jl +++ b/test/test_tree_2d_euler.jl @@ -834,6 +834,32 @@ end end end +@trixi_testset "elixir_euler_warm_bubble.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_warm_bubble.jl"), + l2=[ + 0.0001379946769624388, + 0.02078779689715382, + 0.033237241571263176, + 31.36068872331705, + ], + linf=[ + 0.0016286690573188434, + 0.15623770697198225, + 0.3341371832270615, + 334.5373488726036, + ], + tspan=(0.0, 10.0), + initial_refinement_level=4) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 100 + end +end + # Coverage test for all initial conditions @testset "Compressible Euler: Tests for initial conditions" begin @trixi_testset "elixir_euler_vortex.jl one step with initial_condition_constant" begin diff --git a/test/test_unit.jl b/test/test_unit.jl index 817b4cd550d..e8a8effbe29 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -1287,6 +1287,49 @@ end end end +@timed_testset "Consistency check for LMARS flux" begin + equations = CompressibleEulerEquations2D(1.4) + flux_lmars = FluxLMARS(340) + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + orientations = [1, 2] + u_values = [SVector(1.0, 0.5, -0.7, 1.0), + SVector(1.5, -0.2, 0.1, 5.0)] + + for u in u_values, orientation in orientations + @test flux_lmars(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + for u in u_values, normal_direction in normal_directions + @test flux_lmars(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end + + equations = CompressibleEulerEquations3D(1.4) + normal_directions = [SVector(1.0, 0.0, 0.0), + SVector(0.0, 1.0, 0.0), + SVector(0.0, 0.0, 1.0), + SVector(0.5, -0.5, 0.2), + SVector(-1.2, 0.3, 1.4)] + orientations = [1, 2, 3] + u_values = [SVector(1.0, 0.5, -0.7, 0.1, 1.0), + SVector(1.5, -0.2, 0.1, 0.2, 5.0)] + + for u in u_values, orientation in orientations + @test flux_lmars(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + for u in u_values, normal_direction in normal_directions + @test flux_lmars(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end +end + @testset "FluxRotated vs. direct implementation" begin @timed_testset "CompressibleEulerMulticomponentEquations2D" begin equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4), @@ -1320,7 +1363,8 @@ end u_values = [SVector(1.0, 0.5, -0.7, 1.0), SVector(1.5, -0.2, 0.1, 5.0)] fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, - flux_hll, FluxHLL(min_max_speed_davis), flux_hlle, flux_hllc] + FluxLMARS(340), flux_hll, FluxHLL(min_max_speed_davis), flux_hlle, flux_hllc, + ] for f_std in fluxes f_rot = FluxRotated(f_std) From 1946f9d515ed65c98f2fa36ad6e4be88e33a8237 Mon Sep 17 00:00:00 2001 From: Johannes Markert <10619309+jmark@users.noreply.github.com> Date: Fri, 19 Jan 2024 15:45:53 +0100 Subject: [PATCH 075/166] Feature t8code: Extending to 3D (#1535) * Initial commit for the new feature using t8code as meshing backend. * Delete t8code_2d_dgsem * Added new examples and tests. Testing updates for T8code.jl. * Worked in the comments. * Fixed spelling. * Update src/auxiliary/auxiliary.jl Co-authored-by: Hendrik Ranocha * Added whitespace in Unions. * Adapted commented out code block reporting the no. of elements per level. * Added dummy save mesh support for . * Added test . * Added to method signature. * Deleted unnecessary comments. * Removed commented out tests. * Fixed Morton ordering bug in 2D at mortar interfaces. * Disabled `save_solution` callbacks and added more tests. * Added more tests. * Updated code according to the review. * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dgsem_t8code/containers_2d.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Code cleanup. * Updated to T8code@0.3.0 * Fixing minor issues. * Fixed typo. * Code cleanup. * Enabled `set_ghost` in examples. * Generalized type info in function signature. * Added namespace qualifier. * Updated comments. * Refactored code and deleted lots of it. * Removed a copy operation. * Initial commit. * Fxinig minor bugs. * Fixed minor typo. * Added first 3d example and fixed segfault. * Added many 3D examples and tests. * Backup. * Fixed merging issues. * Adding more tests. * Fixed some merging issues and formatting. * Fixed spelling. * Fixed spelling and changed assert macro. * Applied automatic formatting. * Applied automatic formatting. * Backup. * Removed superfluous outer constructor for T8codeMesh. * Added return statement for consistency. * Fixed wrong indentation by autoformatter. * Added comments. * Made sure an exception is thrown. * Changed flags for sc_init for t8code initialization. * Updated formatting. * Workaround for error about calling MPI routines after MPI has been finalized. * Upped to T8code v0.4.1. * Added mpi_finailize_hook for proper memory cleanup. * Added t8code to test_threaded.jl * Added a `save_mesh_file` call in order to satisfy code coverage. * Improved finalizer logic for T8coeMesh. * Refined code. * Restructured to do blocks. * Moved save_mesh_file call to test file. * Fixed spelling error. * Made sc_finalize optional. * Fixed spelling. * Cleaned up examples. * Updated and cleaned t8code solver codes. * Updated tests for t8code 3D code. * Fixed spelling. * Update elixir_euler_source_terms_nonconforming_unstructured_curved.jl * Update elixir_euler_source_terms_nonconforming_unstructured_curved.jl * Fixed indentation. * Update src/solvers/dgsem_structured/dg_3d.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dgsem_t8code/containers_3d.jl Co-authored-by: Andrew Winters * Update src/callbacks_step/amr_dg3d.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_euler_ec.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl Co-authored-by: Andrew Winters * Update src/solvers/dgsem_structured/dg_3d.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/callbacks_step/analysis_dg3d.jl Co-authored-by: Hendrik Ranocha * Update examples/t8code_3d_dgsem/elixir_euler_free_stream.jl Co-authored-by: Andrew Winters * Removed NDIMS from T8codeMesh construction in case of p4est/p8est connectivity input. * Aligned T8codeMesh constructur with other mesh constructors. * Update examples/t8code_3d_dgsem/elixir_euler_sedov.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_euler_sedov.jl Co-authored-by: Andrew Winters * Cleanup up. * Added @allocated test. * Fixed formatting. * Applied formatter. * Code cleanup. * Removed unused member variable. * Apply suggestions from code review Co-authored-by: Daniel Doehring * suggestions from review * fix format (strange?) * Added comments to help interpreting the source code. * Update src/callbacks_step/amr_dg3d.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Adhered to unified mesh constructor calling scheme. * Applied formatter. * Switched to Float64 instead of Cdouble. * Update src/meshes/t8code_mesh.jl Co-authored-by: Daniel Doehring * Refactored negative volume check. * Applied formatter. * Fixed typo resp. bug. * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * add missing allocation checks * Some refactoring. * Deleted msh file. * Fixed a bug. * Code cleanup. * Ignore gmsh files. * Removed adapt! from global namespace. * Added documentation. * Added @test_warn to test. * Applied formatter. * Apply suggestions from code review Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Turned @warn to @info. * Code cleanup and added @deprecated routines in order to avoid breaking release. * Applied formatter. * Added formatter pragmas to avoid ugly formatting. --------- Co-authored-by: Johannes Markert Co-authored-by: Hendrik Ranocha Co-authored-by: Andrew Winters Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> Co-authored-by: Daniel Doehring Co-authored-by: Benedict Geihe Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- .github/workflows/ci.yml | 1 + .gitignore | 1 + ...ixir_advection_amr_solution_independent.jl | 4 +- .../elixir_advection_amr_unstructured_flag.jl | 6 +- .../t8code_2d_dgsem/elixir_advection_basic.jl | 4 +- .../elixir_advection_nonconforming_flag.jl | 52 +-- .../elixir_advection_unstructured_flag.jl | 6 +- .../elixir_euler_free_stream.jl | 45 +- .../t8code_2d_dgsem/elixir_euler_sedov.jl | 4 +- .../elixir_euler_shockcapturing_ec.jl | 4 +- ...e_terms_nonconforming_unstructured_flag.jl | 45 +- .../elixir_eulergravity_convergence.jl | 4 +- .../t8code_2d_dgsem/elixir_mhd_alfven_wave.jl | 5 +- examples/t8code_2d_dgsem/elixir_mhd_rotor.jl | 6 +- .../elixir_shallowwater_source_terms.jl | 4 +- .../t8code_3d_dgsem/elixir_advection_amr.jl | 66 +++ ...lixir_advection_amr_unstructured_curved.jl | 105 +++++ .../t8code_3d_dgsem/elixir_advection_basic.jl | 59 +++ .../elixir_advection_nonconforming.jl | 85 ++++ .../elixir_advection_unstructured_curved.jl | 98 +++++ examples/t8code_3d_dgsem/elixir_euler_ec.jl | 92 +++++ .../elixir_euler_free_stream.jl | 118 ++++++ .../elixir_euler_free_stream_extruded.jl | 106 +++++ .../t8code_3d_dgsem/elixir_euler_sedov.jl | 97 +++++ ...terms_nonconforming_unstructured_curved.jl | 120 ++++++ .../elixir_euler_source_terms_nonperiodic.jl | 62 +++ src/auxiliary/t8code.jl | 101 +---- src/callbacks_step/amr_dg2d.jl | 2 +- src/callbacks_step/amr_dg3d.jl | 77 +++- src/callbacks_step/analysis_dg3d.jl | 19 +- src/callbacks_step/stepsize_dg3d.jl | 4 +- src/meshes/t8code_mesh.jl | 387 +++++++++++++++--- src/solvers/dgsem_p4est/containers_3d.jl | 5 +- src/solvers/dgsem_p4est/dg_3d.jl | 29 +- src/solvers/dgsem_structured/dg_3d.jl | 17 +- src/solvers/dgsem_t8code/containers_2d.jl | 19 + src/solvers/dgsem_t8code/containers_3d.jl | 235 +++++++++++ src/solvers/dgsem_t8code/dg.jl | 1 + src/solvers/dgsem_tree/dg_3d.jl | 30 +- src/solvers/dgsem_tree/indicators_3d.jl | 3 +- test/runtests.jl | 4 + test/test_t8code_2d.jl | 26 ++ test/test_t8code_3d.jl | 279 +++++++++++++ 43 files changed, 2102 insertions(+), 335 deletions(-) create mode 100644 examples/t8code_3d_dgsem/elixir_advection_amr.jl create mode 100644 examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl create mode 100644 examples/t8code_3d_dgsem/elixir_advection_basic.jl create mode 100644 examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl create mode 100644 examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl create mode 100644 examples/t8code_3d_dgsem/elixir_euler_ec.jl create mode 100644 examples/t8code_3d_dgsem/elixir_euler_free_stream.jl create mode 100644 examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl create mode 100644 examples/t8code_3d_dgsem/elixir_euler_sedov.jl create mode 100644 examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl create mode 100644 examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl create mode 100644 src/solvers/dgsem_t8code/containers_3d.jl create mode 100644 test/test_t8code_3d.jl diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2c0ea798b49..f287cc5feb2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -70,6 +70,7 @@ jobs: - p4est_part1 - p4est_part2 - t8code_part1 + - t8code_part2 - unstructured_dgmulti - parabolic - paper_self_gravitating_gas_dynamics diff --git a/.gitignore b/.gitignore index 3132b9af38b..b4f1cf6bb47 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ *.mesh *.bson *.inp +*.msh **/Manifest.toml out*/ docs/build diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl index 653bab41e2d..0589e76a6a9 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl @@ -93,12 +93,10 @@ solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) coordinates_min = (-5.0, -5.0) coordinates_max = (5.0, 5.0) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (1, 1) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 1) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index adf1d009a59..f285d24fc6c 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -41,9 +41,9 @@ isfile(mesh_file) || # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping_flag, - initial_refinement_level = 1) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping_flag, + initial_refinement_level = 1) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions = boundary_conditions) diff --git a/examples/t8code_2d_dgsem/elixir_advection_basic.jl b/examples/t8code_2d_dgsem/elixir_advection_basic.jl index efc51226586..26ced0970fe 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_basic.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_basic.jl @@ -16,12 +16,10 @@ solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (8, 8) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 1) # A semidiscretization collects data structures and functions for the spatial discretization diff --git a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl index 31a8bc93697..a39f3a7e195 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl @@ -20,31 +20,28 @@ f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s)) faces = (f1, f2, f3, f4) mapping = Trixi.transfinite_mapping(faces) -# Create P4estMesh with 3 x 2 trees and 6 x 4 elements, +# Create T8codeMesh with 3 x 2 trees and 6 x 4 elements, # approximate the geometry with a smaller polydeg for testing. trees_per_dimension = (3, 2) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, mapping = mapping, initial_refinement_level = 1) -function adapt_callback(forest, - forest_from, - which_tree, - lelement_id, - ts, - is_family, - num_elements, - elements_ptr)::Cint - vertex = Vector{Cdouble}(undef, 3) - - elements = unsafe_wrap(Array, elements_ptr, num_elements) - - Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex)) +# Note: This is actually a `p4est_quadrant_t` which is much bigger than the +# following struct. But we only need the first three fields for our purpose. +struct t8_dquad_t + x::Int32 + y::Int32 + level::Int8 + # [...] # See `p4est.h` in `p4est` for more info. +end - level = Trixi.t8_element_level(ts, elements[1]) +# Refine quadrants of each tree at lower left edge to level 4. +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dquad_t}(elements[1])) - # TODO: Make this condition more general. - if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 4 + if el.x == 0 && el.y == 0 && el.level < 4 # return true (refine) return 1 else @@ -53,26 +50,7 @@ function adapt_callback(forest, end end -Trixi.@T8_ASSERT(Trixi.t8_forest_is_committed(mesh.forest)!=0); - -# Init new forest. -new_forest_ref = Ref{Trixi.t8_forest_t}() -Trixi.t8_forest_init(new_forest_ref); -new_forest = new_forest_ref[] - -# Check out `examples/t8_step4_partition_balance_ghost.jl` in -# https://github.com/DLR-AMR/T8code.jl for detailed explanations. -let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0 - Trixi.t8_forest_set_user_data(new_forest, C_NULL) - Trixi.t8_forest_set_adapt(new_forest, mesh.forest, - Trixi.@t8_adapt_callback(adapt_callback), recursive) - Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition) - Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES) - Trixi.t8_forest_commit(new_forest) -end - -mesh.forest = new_forest +Trixi.adapt!(mesh, adapt_callback) # A semidiscretization collects data structures and functions for the spatial discretization semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, diff --git a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl index df9cbc26f6e..5ba1ab15489 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -38,9 +38,9 @@ isfile(mesh_file) || # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping_flag, - initial_refinement_level = 2) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping_flag, + initial_refinement_level = 2) # A semidiscretization collects data structures and functions for the spatial discretization. semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl index 01e0449c67e..37d15f38566 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl @@ -40,25 +40,17 @@ isfile(mesh_file) || # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping, - initial_refinement_level = 1) - -function adapt_callback(forest, - forest_from, - which_tree, - lelement_id, - ts, - is_family, - num_elements, - elements_ptr)::Cint - vertex = Vector{Cdouble}(undef, 3) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping, + initial_refinement_level = 1) - elements = unsafe_wrap(Array, elements_ptr, num_elements) +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + vertex = Vector{Cdouble}(undef, 3) - Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex)) + Trixi.t8_element_vertex_reference_coords(eclass_scheme, elements[1], 0, vertex) - level = Trixi.t8_element_level(ts, elements[1]) + level = Trixi.t8_element_level(eclass_scheme, elements[1]) # TODO: Make this condition more general. if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 3 @@ -70,26 +62,7 @@ function adapt_callback(forest, end end -Trixi.@T8_ASSERT(Trixi.t8_forest_is_committed(mesh.forest)!=0); - -# Init new forest. -new_forest_ref = Ref{Trixi.t8_forest_t}() -Trixi.t8_forest_init(new_forest_ref); -new_forest = new_forest_ref[] - -# Check out `examples/t8_step4_partition_balance_ghost.jl` in -# https://github.com/DLR-AMR/T8code.jl for detailed explanations. -let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0 - Trixi.t8_forest_set_user_data(new_forest, C_NULL) - Trixi.t8_forest_set_adapt(new_forest, mesh.forest, - Trixi.@t8_adapt_callback(adapt_callback), recursive) - Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition) - Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES) - Trixi.t8_forest_commit(new_forest) -end - -mesh.forest = new_forest +Trixi.adapt!(mesh, adapt_callback) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition))) diff --git a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl index 965d794f8dc..82770a4050b 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl @@ -58,12 +58,10 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, coordinates_min = (-1.0, -1.0) coordinates_max = (1.0, 1.0) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (4, 4) mesh = T8codeMesh(trees_per_dimension, polydeg = 4, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 2, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl index 55a9063a001..9ebbd1d28c4 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl @@ -29,12 +29,10 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, coordinates_min = (-1.0, -1.0) coordinates_max = (1.0, 1.0) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (4, 4) mesh = T8codeMesh(trees_per_dimension, polydeg = 4, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 2, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 21f26d79ba8..bcc1abc560e 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -40,25 +40,17 @@ isfile(mesh_file) || # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping_flag, - initial_refinement_level = 1) - -function adapt_callback(forest, - forest_from, - which_tree, - lelement_id, - ts, - is_family, - num_elements, - elements_ptr)::Cint - vertex = Vector{Cdouble}(undef, 3) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping_flag, + initial_refinement_level = 1) - elements = unsafe_wrap(Array, elements_ptr, num_elements) +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + vertex = Vector{Cdouble}(undef, 3) - Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex)) + Trixi.t8_element_vertex_reference_coords(eclass_scheme, elements[1], 0, pointer(vertex)) - level = Trixi.t8_element_level(ts, elements[1]) + level = Trixi.t8_element_level(eclass_scheme, elements[1]) # TODO: Make this condition more general. if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 2 @@ -70,26 +62,7 @@ function adapt_callback(forest, end end -@assert(Trixi.t8_forest_is_committed(mesh.forest)!=0); - -# Init new forest. -new_forest_ref = Ref{Trixi.t8_forest_t}() -Trixi.t8_forest_init(new_forest_ref); -new_forest = new_forest_ref[] - -# Check out `examples/t8_step4_partition_balance_ghost.jl` in -# https://github.com/DLR-AMR/T8code.jl for detailed explanations. -let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0 - Trixi.t8_forest_set_user_data(new_forest, C_NULL) - Trixi.t8_forest_set_adapt(new_forest, mesh.forest, - Trixi.@t8_adapt_callback(adapt_callback), recursive) - Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition) - Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES) - Trixi.t8_forest_commit(new_forest) -end - -mesh.forest = new_forest +Trixi.adapt!(mesh, adapt_callback) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, source_terms = source_terms, diff --git a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl index 6d6bb27e0c3..98a9a5521a9 100644 --- a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl @@ -16,10 +16,8 @@ coordinates_max = (2.0, 2.0) trees_per_dimension = (1, 1) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - mesh = T8codeMesh(trees_per_dimension, polydeg = 1, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 2) semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_condition, diff --git a/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl index 1e2362a123c..e184cb3fd05 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl @@ -11,18 +11,17 @@ initial_condition = initial_condition_convergence_test # Get the DG approximation space volume_flux = (flux_central, flux_nonconservative_powell) + solver = DGSEM(polydeg = 4, surface_flux = (flux_hlle, flux_nonconservative_powell), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (0.0, 0.0) coordinates_max = (sqrt(2.0), sqrt(2.0)) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (8, 8) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 0, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl index 9a4bd99e444..adb154948fb 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl @@ -78,9 +78,9 @@ isfile(mesh_file) || # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 4, - mapping = mapping_twist, - initial_refinement_level = 1) +mesh = T8codeMesh(conn, polydeg = 4, + mapping = mapping_twist, + initial_refinement_level = 1) boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = Dict(:all => boundary_condition) diff --git a/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl index b2d5097036f..3610639d554 100644 --- a/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl +++ b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl @@ -22,12 +22,10 @@ solver = DGSEM(polydeg = 3, coordinates_min = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max = (sqrt(2.0), sqrt(2.0)) # maximum coordinates (max(x), max(y)) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (8, 8) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 1) # A semidiscretization collects data structures and functions for the spatial discretization diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr.jl b/examples/t8code_3d_dgsem/elixir_advection_amr.jl new file mode 100644 index 00000000000..5a4b2218d57 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_amr.jl @@ -0,0 +1,66 @@ +# The same setup as tree_3d_dgsem/elixir_advection_amr.jl +# to verify the T8codeMesh implementation against TreeMesh. + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +initial_condition = initial_condition_gauss +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-5.0, -5.0, -5.0) +coordinates_max = (5.0, 5.0, 5.0) +trees_per_dimension = (1, 1, 1) + +# Note that it is not necessary to use mesh polydeg lower than the solver polydeg +# on a Cartesian mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +mesh = T8codeMesh(trees_per_dimension, polydeg = 1, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + initial_refinement_level = 4) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.3) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_integrals = (entropy,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), + base_level = 4, + med_level = 5, med_threshold = 0.1, + max_level = 6, max_threshold = 0.6) +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl new file mode 100644 index 00000000000..617736afbdd --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -0,0 +1,105 @@ +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. +# The polydeg of the solver must be at least twice as big as the polydeg of the mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + +initial_condition = initial_condition_gauss +boundary_condition = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict(:all => boundary_condition) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. The original mapping applied to this unstructured mesh +# causes some Jacobians to be negative, which makes the mesh invalid. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 4 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 4 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 4 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + # Transform the weird deformed cube to be approximately the size of [-5,5]^3 to match IC + return SVector(5 * x, 5 * y, 5 * z) +end + +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + mesh_file) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 2, + mapping = mapping, + initial_refinement_level = 1) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 8.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_integrals = (entropy,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), + base_level = 1, + med_level = 2, med_threshold = 0.1, + max_level = 3, max_threshold = 0.6) +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_advection_basic.jl b/examples/t8code_3d_dgsem/elixir_advection_basic.jl new file mode 100644 index 00000000000..f49462035aa --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_basic.jl @@ -0,0 +1,59 @@ +# The same setup as tree_3d_dgsem/elixir_advection_basic.jl +# to verify the T8codeMesh implementation against TreeMesh + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z)) +coordinates_max = (1.0, 1.0, 1.0) # maximum coordinates (max(x), max(y), max(z)) + +# Create P4estMesh with 8 x 8 x 8 elements (note `refinement_level=1`) +trees_per_dimension = (4, 4, 4) +mesh = T8codeMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + initial_refinement_level = 1) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval = 100) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.2) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl b/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl new file mode 100644 index 00000000000..8d7a48370f5 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl @@ -0,0 +1,85 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# Semidiscretization of the linear advection equation. + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux. +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z)) +coordinates_max = (1.0, 1.0, 1.0) # maximum coordinates (max(x), max(y), max(z)) +trees_per_dimension = (1, 1, 1) + +# Note that it is not necessary to use mesh polydeg lower than the solver polydeg +# on a Cartesian mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +mesh = T8codeMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + initial_refinement_level = 2) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +# Refine bottom left quadrant of each second tree to level 2 +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if iseven(convert(Int, ltreeid)) && el.x == 0 && el.y == 0 && el.z == 0 && + el.level < 3 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval = 100) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl new file mode 100644 index 00000000000..df358435c9a --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -0,0 +1,98 @@ +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:all => boundary_condition) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. The original mapping applied to this unstructured mesh +# causes some Jacobians to be negative, which makes the mesh invalid. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 6 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 6 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 6 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + mesh_file) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping, + initial_refinement_level = 2) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 0.1 +ode = semidiscretize(semi, (0.0, 0.1)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.2) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/t8code_3d_dgsem/elixir_euler_ec.jl b/examples/t8code_3d_dgsem/elixir_euler_ec.jl new file mode 100644 index 00000000000..07745c3ac56 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_ec.jl @@ -0,0 +1,92 @@ +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(5 / 3) + +initial_condition = initial_condition_weak_blast_wave + +boundary_conditions = Dict(:all => boundary_condition_slip_wall) + +# Get the DG approximation space + +volume_flux = flux_ranocha +solver = DGSEM(polydeg = 5, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +# Get the curved quad mesh from a file + +# Mapping as described in https://arxiv.org/abs/2012.12040 +function mapping(xi_, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + xi = 1.5 * xi_ + 1.5 + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 3 / 8 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 3 / 8 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 3 / 8 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + mesh_file) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 5, + mapping = mapping, + initial_refinement_level = 0) + +# Create the semidiscretization object. +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl new file mode 100644 index 00000000000..e135d464810 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl @@ -0,0 +1,118 @@ +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition)) + +# Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. +# The polydeg of the solver must be at least twice as big as the polydeg of the mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. This can yield problematic geometries if the unrefined mesh +# is not fine enough. +function mapping(xi_, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + xi = 1.5 * xi_ + 1.5 + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 6 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 6 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 6 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + mesh_file) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 2, + mapping = mapping, + initial_refinement_level = 0) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +# Refine bottom left quadrant of each second tree to level 2 +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if iseven(convert(Int, ltreeid)) && el.x == 0 && el.y == 0 && el.z == 0 && + el.level < 2 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl new file mode 100644 index 00000000000..d129b59826e --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -0,0 +1,106 @@ +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition)) + +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but reduced to 2D. +# This particular mesh is unstructured in the yz-plane, but extruded in x-direction. +# Apply the warping mapping in the yz-plane to get a curved 2D mesh that is extruded +# in x-direction to ensure free stream preservation on a non-conforming mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +function mapping(xi, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + z = zeta + + 1 / 6 * (cos(1.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + y = eta + 1 / 6 * (cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(2 * pi * (2 * z - 3) / 3)) + + return SVector(xi, y, z) +end + +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + mesh_file) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connecvity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping, + initial_refinement_level = 0) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +# Refine quadrants in y-direction of each tree at one edge to level 2 +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if convert(Int, ltreeid) < 4 && el.x == 0 && el.y == 0 && el.level < 2 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), #maxiters=1, + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_sedov.jl b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl new file mode 100644 index 00000000000..618b170b661 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl @@ -0,0 +1,97 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +""" + initial_condition_medium_sedov_blast_wave(x, t, equations::CompressibleEulerEquations3D) + +The Sedov blast wave setup based on Flash +- https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000 +with smaller strength of the initial discontinuity. +""" +function initial_condition_medium_sedov_blast_wave(x, t, + equations::CompressibleEulerEquations3D) + # Set up polar coordinates + inicenter = SVector(0.0, 0.0, 0.0) + x_norm = x[1] - inicenter[1] + y_norm = x[2] - inicenter[2] + z_norm = x[3] - inicenter[3] + r = sqrt(x_norm^2 + y_norm^2 + z_norm^2) + + # Setup based on https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000 + r0 = 0.21875 # = 3.5 * smallest dx (for domain length=4 and max-ref=6) + E = 1.0 + p0_inner = 3 * (equations.gamma - 1) * E / (4 * pi * r0^2) + p0_outer = 1.0e-3 + + # Calculate primitive variables + rho = 1.0 + v1 = 0.0 + v2 = 0.0 + v3 = 0.0 + p = r > r0 ? p0_outer : p0_inner + + return prim2cons(SVector(rho, v1, v2, v3, p), equations) +end + +initial_condition = initial_condition_medium_sedov_blast_wave + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha +polydeg = 5 +basis = LobattoLegendreBasis(polydeg) +indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 1.0, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +coordinates_min = (-1.0, -1.0, -1.0) +coordinates_max = (1.0, 1.0, 1.0) + +trees_per_dimension = (4, 4, 4) +mesh = T8codeMesh(trees_per_dimension, + polydeg = 4, initial_refinement_level = 0, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = true) + +# create the semi discretization object +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 12.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 0.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl new file mode 100644 index 00000000000..d4664522bea --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -0,0 +1,120 @@ +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:all => boundary_condition) + +# Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. +# The polydeg of the solver must be at least twice as big as the polydeg of the mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. The original mapping applied to this unstructured mesh +# causes some Jacobians to be negative, which makes the mesh invalid. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 6 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 6 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 6 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + # Transform the weird deformed cube to be approximately the cube [0,2]^3 + return SVector(x + 1, y + 1, z + 1) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + mesh_file) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connecvity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +# Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). +mesh = T8codeMesh(conn, polydeg = 2, + mapping = mapping, + initial_refinement_level = 0) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if el.x == 0 && el.y == 0 && el.z == 0 && el.level < 2 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.045) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 0.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback); + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl new file mode 100644 index 00000000000..7cb03bb312d --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -0,0 +1,62 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:x_neg => boundary_condition, + :x_pos => boundary_condition, + :y_neg => boundary_condition, + :y_pos => boundary_condition, + :z_neg => boundary_condition, + :z_pos => boundary_condition) + +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +coordinates_min = (0.0, 0.0, 0.0) +coordinates_max = (2.0, 2.0, 2.0) + +trees_per_dimension = (2, 2, 2) + +mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) + +mesh = T8codeMesh(trees_per_dimension, polydeg = 1, + mapping = mapping, + periodicity = false, initial_refinement_level = 1) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 0.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl index bd781b21c1e..db01476bb86 100644 --- a/src/auxiliary/t8code.jl +++ b/src/auxiliary/t8code.jl @@ -35,7 +35,7 @@ function init_t8code() # production runs this is not mandatory, but is helpful during # development. Hence, this option is only activated when environment # variable TRIXI_T8CODE_SC_FINALIZE exists. - @warn "T8code.jl: sc_finalize will be called during shutdown of Trixi.jl." + @info "T8code.jl: `sc_finalize` will be called during shutdown of Trixi.jl." MPI.add_finalize_hook!(T8code.Libt8.sc_finalize) end else @@ -116,7 +116,6 @@ function trixi_t8_count_interfaces(forest) elseif level < neighbor_level local_num_mortars += 1 end - else local_num_boundary += 1 end @@ -219,38 +218,9 @@ function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundari interfaces.neighbor_ids[1, interface_id] = current_index + 1 interfaces.neighbor_ids[2, interface_id] = neighbor_ielements[1] + 1 - # Iterate over primary and secondary element. - for side in 1:2 - # Align interface in positive coordinate direction of primary element. - # For orientation == 1, the secondary element needs to be indexed backwards - # relative to the interface. - if side == 1 || orientation == 0 - # Forward indexing - indexing = :i_forward - else - # Backward indexing - indexing = :i_backward - end - - if faces[side] == 0 - # Index face in negative x-direction - interfaces.node_indices[side, interface_id] = (:begin, - indexing) - elseif faces[side] == 1 - # Index face in positive x-direction - interfaces.node_indices[side, interface_id] = (:end, - indexing) - elseif faces[side] == 2 - # Index face in negative y-direction - interfaces.node_indices[side, interface_id] = (indexing, - :begin) - else # faces[side] == 3 - # Index face in positive y-direction - interfaces.node_indices[side, interface_id] = (indexing, - :end) - end - end - + # Save interfaces.node_indices dimension specific in containers_3d.jl. + init_interface_node_indices!(interfaces, faces, orientation, + interface_id) # Non-conforming interface. elseif level < neighbor_level local_num_mortars += 1 @@ -262,42 +232,13 @@ function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundari # Last entry is the large element. mortars.neighbor_ids[end, mortar_id] = current_index + 1 - # First `1:end-1` entries are the smaller elements. - mortars.neighbor_ids[1:(end - 1), mortar_id] .= neighbor_ielements .+ - 1 - - for side in 1:2 - # Align mortar in positive coordinate direction of small side. - # For orientation == 1, the large side needs to be indexed backwards - # relative to the mortar. - if side == 1 || orientation == 0 - # Forward indexing for small side or orientation == 0. - indexing = :i_forward - else - # Backward indexing for large side with reversed orientation. - indexing = :i_backward - # Since the orientation is reversed we have to account for this - # when filling the `neighbor_ids` array. - mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + - 1 - mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + - 1 - end - - if faces[side] == 0 - # Index face in negative x-direction - mortars.node_indices[side, mortar_id] = (:begin, indexing) - elseif faces[side] == 1 - # Index face in positive x-direction - mortars.node_indices[side, mortar_id] = (:end, indexing) - elseif faces[side] == 2 - # Index face in negative y-direction - mortars.node_indices[side, mortar_id] = (indexing, :begin) - else # faces[side] == 3 - # Index face in positive y-direction - mortars.node_indices[side, mortar_id] = (indexing, :end) - end - end + # Fill in the `mortars.neighbor_ids` array and reorder if necessary. + init_mortar_neighbor_ids!(mortars, faces[2], faces[1], + orientation, neighbor_ielements, + mortar_id) + + # Fill in the `mortars.node_indices` array. + init_mortar_node_indices!(mortars, faces, orientation, mortar_id) # else: "level > neighbor_level" is skipped since we visit the mortar interface only once. end @@ -309,19 +250,7 @@ function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundari boundaries.neighbor_ids[boundary_id] = current_index + 1 - if iface == 0 - # Index face in negative x-direction. - boundaries.node_indices[boundary_id] = (:begin, :i_forward) - elseif iface == 1 - # Index face in positive x-direction. - boundaries.node_indices[boundary_id] = (:end, :i_forward) - elseif iface == 2 - # Index face in negative y-direction. - boundaries.node_indices[boundary_id] = (:i_forward, :begin) - else # iface == 3 - # Index face in positive y-direction. - boundaries.node_indices[boundary_id] = (:i_forward, :end) - end + init_boundary_node_indices!(boundaries, iface, boundary_id) # One-based indexing. boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] @@ -420,13 +349,15 @@ function trixi_t8_adapt_new(old_forest, indicators) t8_forest_init(new_forest_ref) new_forest = new_forest_ref[] - let set_from = C_NULL, recursive = 0, set_for_coarsening = 0, no_repartition = 0 + let set_from = C_NULL, recursive = 0, set_for_coarsening = 0, no_repartition = 0, + do_ghost = 1 + t8_forest_set_user_data(new_forest, pointer(indicators)) t8_forest_set_adapt(new_forest, old_forest, @t8_adapt_callback(adapt_callback), recursive) t8_forest_set_balance(new_forest, set_from, no_repartition) t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - t8_forest_set_ghost(new_forest, 1, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. t8_forest_commit(new_forest) end diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index 98e531295b7..b816bc06e65 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -396,7 +396,7 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, old_index = 1 new_index = 1 - # Note: This is true for `quads` only. + # Note: This is true for `quads`. T8_CHILDREN = 4 # Retain current solution data. diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index c8abe6fdb05..392cbba9e28 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -304,9 +304,84 @@ end # this method is called when an `ControllerThreeLevel` is constructed function create_cache(::Type{ControllerThreeLevel}, - mesh::Union{TreeMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, dg::DG, cache) controller_value = Vector{Int}(undef, nelements(dg, cache)) return (; controller_value) end + +# Coarsen and refine elements in the DG solver based on a difference list. +function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{3}, equations, + dg::DGSEM, cache, difference) + + # Return early if there is nothing to do. + if !any(difference .!= 0) + return nothing + end + + # Number of (local) cells/elements. + old_nelems = nelements(dg, cache) + new_nelems = ncells(mesh) + + # Local element indices. + old_index = 1 + new_index = 1 + + # Note: This is only true for `hexs`. + T8_CHILDREN = 8 + + # Retain current solution data. + old_u_ode = copy(u_ode) + + GC.@preserve old_u_ode begin + old_u = wrap_array(old_u_ode, mesh, equations, dg, cache) + + reinitialize_containers!(mesh, equations, dg, cache) + + resize!(u_ode, + nvariables(equations) * ndofs(mesh, dg, cache)) + u = wrap_array(u_ode, mesh, equations, dg, cache) + + u_tmp1 = Array{eltype(u), 4}(undef, nvariables(equations), nnodes(dg), + nnodes(dg), nnodes(dg)) + u_tmp2 = Array{eltype(u), 4}(undef, nvariables(equations), nnodes(dg), + nnodes(dg), nnodes(dg)) + + while old_index <= old_nelems && new_index <= new_nelems + if difference[old_index] > 0 # Refine. + + # Refine element and store solution directly in new data structure. + refine_element!(u, new_index, old_u, old_index, adaptor, equations, dg, + u_tmp1, u_tmp2) + + old_index += 1 + new_index += T8_CHILDREN + + elseif difference[old_index] < 0 # Coarsen. + + # If an element is to be removed, sanity check if the following elements + # are also marked - otherwise there would be an error in the way the + # cells/elements are sorted. + @assert all(difference[old_index:(old_index + T8_CHILDREN - 1)] .< 0) "bad cell/element order" + + # Coarsen elements and store solution directly in new data structure. + coarsen_elements!(u, new_index, old_u, old_index, adaptor, equations, + dg, u_tmp1, u_tmp2) + + old_index += T8_CHILDREN + new_index += 1 + + else # No changes. + + # Copy old element data to new element container. + @views u[:, .., new_index] .= old_u[:, .., old_index] + + old_index += 1 + new_index += 1 + end + end # while + end # GC.@preserve old_u_ode + + return nothing +end end # @muladd diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl index 81d0795a159..27e8a2b722f 100644 --- a/src/callbacks_step/analysis_dg3d.jl +++ b/src/callbacks_step/analysis_dg3d.jl @@ -35,7 +35,9 @@ function create_cache_analysis(analyzer, mesh::TreeMesh{3}, return (; u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2) end -function create_cache_analysis(analyzer, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, +function create_cache_analysis(analyzer, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DG, cache, RealT, uEltype) @@ -118,7 +120,7 @@ function calc_error_norms(func, u, t, analyzer, end function calc_error_norms(func, u, t, analyzer, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @@ -190,7 +192,8 @@ function integrate_via_indices(func::Func, u, end function integrate_via_indices(func::Func, u, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis @@ -218,7 +221,8 @@ function integrate_via_indices(func::Func, u, end function integrate(func::Func, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DG, cache; normalize = true) where {Func} integrate_via_indices(u, mesh, equations, dg, cache; normalize = normalize) do u, i, j, k, element, equations, dg @@ -248,7 +252,8 @@ function integrate(func::Func, u, end function analyze(::typeof(entropy_timederivative), du, u, t, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DG, cache) # Calculate ∫(∂S/∂u ⋅ ∂u/∂t)dΩ integrate_via_indices(u, mesh, equations, dg, cache, @@ -277,7 +282,7 @@ function analyze(::Val{:l2_divb}, du, u, t, end function analyze(::Val{:l2_divb}, du, u, t, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations::IdealGlmMhdEquations3D, dg::DGSEM, cache) @unpack contravariant_vectors = cache.elements @@ -333,7 +338,7 @@ function analyze(::Val{:linf_divb}, du, u, t, end function analyze(::Val{:linf_divb}, du, u, t, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations::IdealGlmMhdEquations3D, dg::DGSEM, cache) @unpack derivative_matrix, weights = dg.basis diff --git a/src/callbacks_step/stepsize_dg3d.jl b/src/callbacks_step/stepsize_dg3d.jl index c9ab7c478a8..822ab2f87ec 100644 --- a/src/callbacks_step/stepsize_dg3d.jl +++ b/src/callbacks_step/stepsize_dg3d.jl @@ -44,7 +44,7 @@ function max_dt(u, t, mesh::TreeMesh{3}, return 2 / (nnodes(dg) * max_scaled_speed) end -function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, +function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, constant_speed::False, equations, dg::DG, cache) # to avoid a division by zero if the speed vanishes everywhere, # e.g. for steady-state linear advection @@ -82,7 +82,7 @@ function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, return 2 / (nnodes(dg) * max_scaled_speed) end -function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, +function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, constant_speed::True, equations, dg::DG, cache) # to avoid a division by zero if the speed vanishes everywhere, # e.g. for steady-state linear advection diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index 13edcc29711..c9665a22af9 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -115,19 +115,49 @@ Non-periodic boundaries will be called ':x_neg', ':x_pos', ':y_neg', ':y_pos', ' - 'polydeg::Integer': polynomial degree used to store the geometry of the mesh. The mapping will be approximated by an interpolation polynomial of the specified degree for each tree. -- 'mapping': a function of 'NDIMS' variables to describe the mapping that transforms - the reference mesh ('[-1, 1]^n') to the physical domain. +- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms + the reference mesh (`[-1, 1]^n`) to the physical domain. + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. +- `faces::NTuple{2*NDIMS}`: a tuple of `2 * NDIMS` functions that describe the faces of the domain. + Each function must take `NDIMS-1` arguments. + `faces[1]` describes the face onto which the face in negative x-direction + of the unit hypercube is mapped. The face in positive x-direction of + the unit hypercube will be mapped onto the face described by `faces[2]`. + `faces[3:4]` describe the faces in positive and negative y-direction respectively + (in 2D and 3D). + `faces[5:6]` describe the faces in positive and negative z-direction respectively (in 3D). + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. +- `coordinates_min`: vector or tuple of the coordinates of the corner in the negative direction of each dimension + to create a rectangular mesh. + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. +- `coordinates_max`: vector or tuple of the coordinates of the corner in the positive direction of each dimension + to create a rectangular mesh. + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. - 'RealT::Type': the type that should be used for coordinates. - 'initial_refinement_level::Integer': refine the mesh uniformly to this level before the simulation starts. - 'periodicity': either a 'Bool' deciding if all of the boundaries are periodic or an 'NTuple{NDIMS, Bool}' deciding for each dimension if the boundaries in this dimension are periodic. """ -function T8codeMesh(trees_per_dimension; polydeg, - mapping = coordinates2mapping((-1.0, -1.0), (1.0, 1.0)), - RealT = Float64, initial_refinement_level = 0, periodicity = true) - NDIMS = length(trees_per_dimension) +function T8codeMesh(trees_per_dimension; polydeg = 1, + mapping = nothing, faces = nothing, coordinates_min = nothing, + coordinates_max = nothing, + RealT = Float64, initial_refinement_level = 0, + periodicity = true) + @assert ((coordinates_min === nothing)===(coordinates_max === nothing)) "Either both or none of coordinates_min and coordinates_max must be specified" + + @assert count(i -> i !== nothing, + (mapping, faces, coordinates_min))==1 "Exactly one of mapping, faces and coordinates_min/max must be specified" + + # Extract mapping + if faces !== nothing + validate_faces(faces) + mapping = transfinite_mapping(faces) + elseif coordinates_min !== nothing + mapping = coordinates2mapping(coordinates_min, coordinates_max) + end - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. + NDIMS = length(trees_per_dimension) + @assert (NDIMS == 2||NDIMS == 3) "NDIMS should be 2 or 3." # Convert periodicity to a Tuple of a Bool for every dimension if all(periodicity) @@ -141,10 +171,18 @@ function T8codeMesh(trees_per_dimension; polydeg, periodicity = Tuple(periodicity) end - conn = T8code.Libt8.p4est_connectivity_new_brick(trees_per_dimension..., periodicity...) do_partition = 0 - cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), do_partition) - T8code.Libt8.p4est_connectivity_destroy(conn) + if NDIMS == 2 + conn = T8code.Libt8.p4est_connectivity_new_brick(trees_per_dimension..., + periodicity...) + cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), do_partition) + T8code.Libt8.p4est_connectivity_destroy(conn) + elseif NDIMS == 3 + conn = T8code.Libt8.p8est_connectivity_new_brick(trees_per_dimension..., + periodicity...) + cmesh = t8_cmesh_new_from_p8est(conn, mpi_comm(), do_partition) + T8code.Libt8.p8est_connectivity_destroy(conn) + end scheme = t8_scheme_new_default_cxx() forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) @@ -156,28 +194,48 @@ function T8codeMesh(trees_per_dimension; polydeg, ntuple(_ -> length(nodes), NDIMS)..., prod(trees_per_dimension)) - # Get cell length in reference mesh: Omega_ref = [-1,1]^2. - dx = 2 / trees_per_dimension[1] - dy = 2 / trees_per_dimension[2] + # Get cell length in reference mesh: Omega_ref = [-1,1]^NDIMS. + dx = [2 / n for n in trees_per_dimension] num_local_trees = t8_cmesh_get_num_local_trees(cmesh) # Non-periodic boundaries. boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension)) + if mapping === nothing + mapping_ = coordinates2mapping(ntuple(_ -> -1.0, NDIMS), ntuple(_ -> 1.0, NDIMS)) + else + mapping_ = mapping + end + for itree in 1:num_local_trees veptr = t8_cmesh_get_tree_vertices(cmesh, itree - 1) verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) # Calculate node coordinates of reference mesh. - cell_x_offset = (verts[1, 1] - 1 / 2 * (trees_per_dimension[1] - 1)) * dx - cell_y_offset = (verts[2, 1] - 1 / 2 * (trees_per_dimension[2] - 1)) * dy - - for j in eachindex(nodes), i in eachindex(nodes) - tree_node_coordinates[:, i, j, itree] .= mapping(cell_x_offset + - dx * nodes[i] / 2, - cell_y_offset + - dy * nodes[j] / 2) + if NDIMS == 2 + cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1] + cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2] + + for j in eachindex(nodes), i in eachindex(nodes) + tree_node_coordinates[:, i, j, itree] .= mapping_(cell_x_offset + + dx[1] * nodes[i] / 2, + cell_y_offset + + dx[2] * nodes[j] / 2) + end + elseif NDIMS == 3 + cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1] + cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2] + cell_z_offset = (verts[3, 1] - 0.5 * (trees_per_dimension[3] - 1)) * dx[3] + + for k in eachindex(nodes), j in eachindex(nodes), i in eachindex(nodes) + tree_node_coordinates[:, i, j, k, itree] .= mapping_(cell_x_offset + + dx[1] * nodes[i] / 2, + cell_y_offset + + dx[2] * nodes[j] / 2, + cell_z_offset + + dx[3] * nodes[k] / 2) + end end if !periodicity[1] @@ -189,6 +247,13 @@ function T8codeMesh(trees_per_dimension; polydeg, boundary_names[3, itree] = :y_neg boundary_names[4, itree] = :y_pos end + + if NDIMS > 2 + if !periodicity[3] + boundary_names[5, itree] = :z_neg + boundary_names[6, itree] = :z_pos + end + end end return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, @@ -196,9 +261,9 @@ function T8codeMesh(trees_per_dimension; polydeg, end """ - T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}, - mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0) + T8codeMesh(cmesh::Ptr{t8_cmesh}, + mapping=nothing, polydeg=1, RealT=Float64, + initial_refinement_level=0) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a `t8_cmesh` data structure. @@ -215,10 +280,15 @@ conforming mesh from a `t8_cmesh` data structure. - `RealT::Type`: the type that should be used for coordinates. - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ -function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; - mapping = nothing, polydeg = 1, RealT = Float64, - initial_refinement_level = 0) where {NDIMS} - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. +function T8codeMesh(cmesh::Ptr{t8_cmesh}; + mapping = nothing, polydeg = 1, RealT = Float64, + initial_refinement_level = 0) + @assert (t8_cmesh_get_num_trees(cmesh)>0) "Given `cmesh` does not contain any trees." + + # Infer NDIMS from the geometry of the first tree. + NDIMS = Int(t8_geom_get_dimension(t8_cmesh_get_tree_geometry(cmesh, 0))) + + @assert (NDIMS == 2||NDIMS == 3) "NDIMS should be 2 or 3." scheme = t8_scheme_new_default_cxx() forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) @@ -234,34 +304,84 @@ function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; nodes_in = [-1.0, 1.0] matrix = polynomial_interpolation_matrix(nodes_in, nodes) - data_in = Array{RealT, 3}(undef, 2, 2, 2) - tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in)) - for itree in 0:(num_local_trees - 1) - veptr = t8_cmesh_get_tree_vertices(cmesh, itree) - verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) + if NDIMS == 2 + data_in = Array{RealT, 3}(undef, 2, 2, 2) + tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in)) + verts = zeros(3, 4) - u = verts[:, 2] - verts[:, 1] - v = verts[:, 3] - verts[:, 1] - w = [0.0, 0.0, 1.0] + for itree in 0:(num_local_trees - 1) + veptr = t8_cmesh_get_tree_vertices(cmesh, itree) - vol = dot(cross(u, v), w) + # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))` + # sometimes does not work since `veptr` is not necessarily properly + # aligned to 8 bytes. + for icorner in 1:4 + verts[1, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 1) + verts[2, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 2) + end - if vol < 0.0 - @warn "Discovered negative volumes in `cmesh`: vol = $vol" + # Check if tree's node ordering is right-handed or print a warning. + let z = zero(eltype(verts)), o = one(eltype(verts)) + u = verts[:, 2] - verts[:, 1] + v = verts[:, 3] - verts[:, 1] + w = [z, z, o] + + # Triple product gives signed volume of spanned parallelepiped. + vol = dot(cross(u, v), w) + + if vol < z + @warn "Discovered negative volumes in `cmesh`: vol = $vol" + end + end + + # Tree vertices are stored in z-order. + @views data_in[:, 1, 1] .= verts[1:2, 1] + @views data_in[:, 2, 1] .= verts[1:2, 2] + @views data_in[:, 1, 2] .= verts[1:2, 3] + @views data_in[:, 2, 2] .= verts[1:2, 4] + + # Interpolate corner coordinates to specified nodes. + multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, itree + 1), + matrix, matrix, + data_in, + tmp1) end - # Tree vertices are stored in z-order. - @views data_in[:, 1, 1] .= verts[1:2, 1] - @views data_in[:, 2, 1] .= verts[1:2, 2] - @views data_in[:, 1, 2] .= verts[1:2, 3] - @views data_in[:, 2, 2] .= verts[1:2, 4] - - # Interpolate corner coordinates to specified nodes. - multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, itree + 1), - matrix, matrix, - data_in, - tmp1) + elseif NDIMS == 3 + data_in = Array{RealT, 4}(undef, 3, 2, 2, 2) + tmp1 = zeros(RealT, 3, length(nodes), length(nodes_in), length(nodes_in)) + verts = zeros(3, 8) + + for itree in 0:(num_local_trees - 1) + veptr = t8_cmesh_get_tree_vertices(cmesh, itree) + + # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))` + # sometimes does not work since `veptr` is not necessarily properly + # aligned to 8 bytes. + for icorner in 1:8 + verts[1, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 1) + verts[2, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 2) + verts[3, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 3) + end + + # Tree vertices are stored in z-order. + @views data_in[:, 1, 1, 1] .= verts[1:3, 1] + @views data_in[:, 2, 1, 1] .= verts[1:3, 2] + @views data_in[:, 1, 2, 1] .= verts[1:3, 3] + @views data_in[:, 2, 2, 1] .= verts[1:3, 4] + + @views data_in[:, 1, 1, 2] .= verts[1:3, 5] + @views data_in[:, 2, 1, 2] .= verts[1:3, 6] + @views data_in[:, 1, 2, 2] .= verts[1:3, 7] + @views data_in[:, 2, 2, 2] .= verts[1:3, 8] + + # Interpolate corner coordinates to specified nodes. + multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, :, itree + 1), + matrix, matrix, matrix, + data_in, + tmp1) + end end map_node_coordinates!(tree_node_coordinates, mapping) @@ -274,9 +394,7 @@ function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; end """ - T8codeMesh{NDIMS}(conn::Ptr{p4est_connectivity}, - mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0) + T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a `p4est_connectivity` data structure. @@ -293,24 +411,45 @@ conforming mesh from a `p4est_connectivity` data structure. - `RealT::Type`: the type that should be used for coordinates. - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ -function T8codeMesh{NDIMS}(conn::Ptr{p4est_connectivity}; kwargs...) where {NDIMS} - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. - +function T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), 0) - return T8codeMesh{NDIMS}(cmesh; kwargs...) + return T8codeMesh(cmesh; kwargs...) +end + +""" + T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) + +Main mesh constructor for the `T8codeMesh` that imports an unstructured, +conforming mesh from a `p4est_connectivity` data structure. + +# Arguments +- `conn::Ptr{p4est_connectivity}`: Pointer to a P4est connectivity object. +- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms + the imported mesh to the physical domain. Use `nothing` for the identity map. +- `polydeg::Integer`: polynomial degree used to store the geometry of the mesh. + The mapping will be approximated by an interpolation polynomial + of the specified degree for each tree. + The default of `1` creates an uncurved geometry. Use a higher value if the mapping + will curve the imported uncurved mesh. +- `RealT::Type`: the type that should be used for coordinates. +- `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. +""" +function T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) + cmesh = t8_cmesh_new_from_p8est(conn, mpi_comm(), 0) + + return T8codeMesh(cmesh; kwargs...) end """ - T8codeMesh{NDIMS}(meshfile::String; - mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0) + T8codeMesh{NDIMS}(meshfile::String; kwargs...) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a Gmsh mesh file (`.msh`). # Arguments - `meshfile::String`: path to a Gmsh mesh file. +- `ndims`: Mesh file dimension: `2` or `3`. - `mapping`: a function of `NDIMS` variables to describe the mapping that transforms the imported mesh to the physical domain. Use `nothing` for the identity map. - `polydeg::Integer`: polynomial degree used to store the geometry of the mesh. @@ -321,17 +460,130 @@ mesh from a Gmsh mesh file (`.msh`). - `RealT::Type`: the type that should be used for coordinates. - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ -function T8codeMesh{NDIMS}(meshfile::String; kwargs...) where {NDIMS} - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. +function T8codeMesh(meshfile::String, ndims; kwargs...) # Prevent `t8code` from crashing Julia if the file doesn't exist. @assert isfile(meshfile) meshfile_prefix, meshfile_suffix = splitext(meshfile) - cmesh = t8_cmesh_from_msh_file(meshfile_prefix, 0, mpi_comm(), NDIMS, 0, 0) + cmesh = t8_cmesh_from_msh_file(meshfile_prefix, 0, mpi_comm(), ndims, 0, 0) + + return T8codeMesh(cmesh; kwargs...) +end + +struct adapt_callback_passthrough + adapt_callback::Function + user_data::Any +end - return T8codeMesh{NDIMS}(cmesh; kwargs...) +# Callback function prototype to decide for refining and coarsening. +# If `is_family` equals 1, the first `num_elements` in elements +# form a family and we decide whether this family should be coarsened +# or only the first element should be refined. +# Otherwise `is_family` must equal zero and we consider the first entry +# of the element array for refinement. +# Entries of the element array beyond the first `num_elements` are undefined. +# \param [in] forest the forest to which the new elements belong +# \param [in] forest_from the forest that is adapted. +# \param [in] which_tree the local tree containing `elements` +# \param [in] lelement_id the local element id in `forest_old` in the tree of the current element +# \param [in] ts the eclass scheme of the tree +# \param [in] is_family if 1, the first `num_elements` entries in `elements` form a family. If 0, they do not. +# \param [in] num_elements the number of entries in `elements` that are defined +# \param [in] elements Pointers to a family or, if `is_family` is zero, +# pointer to one element. +# \return greater zero if the first entry in `elements` should be refined, +# smaller zero if the family `elements` shall be coarsened, +# zero else. +function adapt_callback_wrapper(forest, + forest_from, + which_tree, + lelement_id, + ts, + is_family, + num_elements, + elements_ptr)::Cint + passthrough = unsafe_pointer_to_objref(t8_forest_get_user_data(forest))[] + + elements = unsafe_wrap(Array, elements_ptr, num_elements) + + return passthrough.adapt_callback(forest_from, which_tree, ts, lelement_id, elements, + Bool(is_family), passthrough.user_data) +end + +""" + Trixi.adapt!(mesh::T8codeMesh, adapt_callback; kwargs...) + +Adapt a `T8codeMesh` according to a user-defined `adapt_callback`. + +# Arguments +- `mesh::T8codeMesh`: Initialized mesh object. +- `adapt_callback`: A user-defined callback which tells the adaption routines + if an element should be refined, coarsened or stay unchanged. + + The expected callback signature is as follows: + + `adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, user_data)` + # Arguments + - `forest`: Pointer to the analyzed forest. + - `ltreeid`: Local index of the current tree where the analyzed elements are part of. + - `eclass_scheme`: Element class of `elements`. + - `lelemntid`: Local index of the first element in `elements`. + - `elements`: Array of elements. If consecutive elements form a family + they are passed together, otherwise `elements` consists of just one element. + - `is_family`: Boolean signifying if `elements` represents a family or not. + - `user_data`: Void pointer to some arbitrary user data. Default value is `C_NULL`. + # Returns + -1 : Coarsen family of elements. + 0 : Stay unchanged. + 1 : Refine element. + +- `kwargs`: + - `recursive = true`: Adapt the forest recursively. If true the caller must ensure that the callback + returns 0 for every analyzed element at some point to stop the recursion. + - `balance = true`: Make sure the adapted forest is 2^(NDIMS-1):1 balanced. + - `partition = true`: Partition the forest to redistribute elements evenly among MPI ranks. + - `ghost = true`: Create a ghost layer for MPI data exchange. + - `user_data = C_NULL`: Pointer to some arbitrary user-defined data. +""" +function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = true, + partition = true, ghost = true, user_data = C_NULL) + # Check that forest is a committed, that is valid and usable, forest. + @assert t8_forest_is_committed(mesh.forest) != 0 + + # Init new forest. + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + # Check out `examples/t8_step4_partition_balance_ghost.jl` in + # https://github.com/DLR-AMR/T8code.jl for detailed explanations. + let set_from = C_NULL, set_for_coarsening = 0, no_repartition = !partition + t8_forest_set_user_data(new_forest, + pointer_from_objref(Ref(adapt_callback_passthrough(adapt_callback, + user_data)))) + t8_forest_set_adapt(new_forest, mesh.forest, + @t8_adapt_callback(adapt_callback_wrapper), + recursive) + if balance + t8_forest_set_balance(new_forest, set_from, no_repartition) + end + + if partition + t8_forest_set_partition(new_forest, set_from, set_for_coarsening) + end + + t8_forest_set_ghost(new_forest, ghost, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + + # The old forest is destroyed here. + # Call `t8_forest_ref(Ref(mesh.forest))` to keep it. + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + + return nothing end # TODO: Just a placeholder. Will be implemented later when MPI is supported. @@ -343,3 +595,10 @@ end function partition!(mesh::T8codeMesh; allow_coarsening = true, weight_fn = C_NULL) return nothing end + +#! format: off +@deprecate T8codeMesh{2}(conn::Ptr{p4est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) +@deprecate T8codeMesh{3}(conn::Ptr{p8est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) +@deprecate T8codeMesh{2}(meshfile::String; kwargs...) T8codeMesh(meshfile::String, 2; kwargs...) +@deprecate T8codeMesh{3}(meshfile::String; kwargs...) T8codeMesh(meshfile::String, 3; kwargs...) +#! format: on diff --git a/src/solvers/dgsem_p4est/containers_3d.jl b/src/solvers/dgsem_p4est/containers_3d.jl index e9994fe4569..7e383924ba7 100644 --- a/src/solvers/dgsem_p4est/containers_3d.jl +++ b/src/solvers/dgsem_p4est/containers_3d.jl @@ -6,7 +6,8 @@ #! format: noindent # Initialize data structures in element container -function init_elements!(elements, mesh::P4estMesh{3}, basis::LobattoLegendreBasis) +function init_elements!(elements, mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + basis::LobattoLegendreBasis) @unpack node_coordinates, jacobian_matrix, contravariant_vectors, inverse_jacobian = elements @@ -26,7 +27,7 @@ end # Interpolate tree_node_coordinates to each quadrant at the nodes of the specified basis function calc_node_coordinates!(node_coordinates, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, basis::LobattoLegendreBasis) # Hanging nodes will cause holes in the mesh if its polydeg is higher # than the polydeg of the solver. diff --git a/src/solvers/dgsem_p4est/dg_3d.jl b/src/solvers/dgsem_p4est/dg_3d.jl index 4c0845ba9af..5b3c5ae5ca8 100644 --- a/src/solvers/dgsem_p4est/dg_3d.jl +++ b/src/solvers/dgsem_p4est/dg_3d.jl @@ -7,8 +7,8 @@ # The methods below are specialized on the mortar type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::P4estMesh{3}, equations, mortar_l2::LobattoLegendreMortarL2, - uEltype) +function create_cache(mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, + mortar_l2::LobattoLegendreMortarL2, uEltype) # TODO: Taal compare performance of different types fstar_threaded = [Array{uEltype, 4}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2), 4) @@ -88,7 +88,7 @@ end # We pass the `surface_integral` argument solely for dispatch function prolong2interfaces!(cache, u, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack interfaces = cache index_range = eachnode(dg) @@ -163,7 +163,7 @@ function prolong2interfaces!(cache, u, end function calc_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack neighbor_ids, node_indices = cache.interfaces @@ -244,7 +244,7 @@ end # Inlined function for interface flux computation for conservative flux terms @inline function calc_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -271,7 +271,7 @@ end # Inlined function for interface flux computation for flux + nonconservative terms @inline function calc_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::True, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -314,7 +314,7 @@ end end function prolong2boundaries!(cache, u, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack boundaries = cache index_range = eachnode(dg) @@ -355,7 +355,7 @@ function prolong2boundaries!(cache, u, end function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack boundaries = cache @unpack surface_flux_values, node_coordinates, contravariant_vectors = cache.elements @@ -417,7 +417,7 @@ function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, end function prolong2mortars!(cache, u, - mesh::P4estMesh{3}, equations, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack fstar_tmp_threaded = cache @@ -521,7 +521,7 @@ function prolong2mortars!(cache, u, end function calc_mortar_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -595,7 +595,7 @@ end # Inlined version of the mortar flux computation on small elements for conservation fluxes @inline function calc_mortar_flux!(fstar, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -616,7 +616,7 @@ end # Inlined version of the mortar flux computation on small elements for conservation fluxes # with nonconservative terms @inline function calc_mortar_flux!(fstar, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::True, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -643,7 +643,8 @@ end end @inline function mortar_fluxes_to_elements!(surface_flux_values, - mesh::P4estMesh{3}, equations, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer, fstar_tmp) @@ -727,7 +728,7 @@ end end function calc_surface_integral!(du, u, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral::SurfaceIntegralWeakForm, dg::DGSEM, cache) diff --git a/src/solvers/dgsem_structured/dg_3d.jl b/src/solvers/dgsem_structured/dg_3d.jl index cdb085e9008..1df9f408895 100644 --- a/src/solvers/dgsem_structured/dg_3d.jl +++ b/src/solvers/dgsem_structured/dg_3d.jl @@ -58,7 +58,8 @@ See also https://github.com/trixi-framework/Trixi.jl/issues/1671#issuecomment-17 =# @inline function weak_form_kernel!(du, u, element, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::False, equations, dg::DGSEM, cache, alpha = true) # true * [some floating point value] == [exactly the same floating point value] @@ -115,7 +116,8 @@ end # the physical fluxes in each Cartesian direction @inline function flux_differencing_kernel!(du, u, element, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::False, equations, volume_flux, dg::DGSEM, cache, alpha = true) # true * [some floating point value] == [exactly the same floating point value] @@ -189,7 +191,8 @@ end @inline function flux_differencing_kernel!(du, u, element, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::True, equations, volume_flux, dg::DGSEM, cache, alpha = true) @unpack derivative_split = dg.basis @@ -274,7 +277,8 @@ end # [arXiv: 2008.12044v2](https://arxiv.org/pdf/2008.12044) @inline function calcflux_fv!(fstar1_L, fstar1_R, fstar2_L, fstar2_R, fstar3_L, fstar3_R, u, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::False, equations, volume_flux_fv, dg::DGSEM, element, cache) @unpack contravariant_vectors = cache.elements @@ -369,7 +373,8 @@ end # # Calculate the finite volume fluxes inside curvilinear elements (**with non-conservative terms**). @inline function calcflux_fv!(fstar1_L, fstar1_R, fstar2_L, fstar2_R, fstar3_L, fstar3_R, u, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::True, equations, volume_flux_fv, dg::DGSEM, element, cache) @unpack contravariant_vectors = cache.elements @@ -783,7 +788,7 @@ function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, end function apply_jacobian!(du, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, dg::DG, cache) @threaded for element in eachelement(dg, cache) for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) diff --git a/src/solvers/dgsem_t8code/containers_2d.jl b/src/solvers/dgsem_t8code/containers_2d.jl index 029e6674afb..bf77826a34b 100644 --- a/src/solvers/dgsem_t8code/containers_2d.jl +++ b/src/solvers/dgsem_t8code/containers_2d.jl @@ -1,3 +1,7 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. @muladd begin #! format: noindent @@ -27,6 +31,9 @@ function calc_node_coordinates!(node_coordinates, element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) element_level = t8_element_level(eclass_scheme, element) + # Note, `t8_quad_len` is encoded as an integer (Morton encoding) in + # relation to `t8_quad_root_len`. This line transforms the + # "integer" length to a float in relation to the unit interval [0,1]. element_length = t8_quad_len(element_level) / t8_quad_root_len element_coords = Array{Float64}(undef, 3) @@ -55,4 +62,16 @@ function calc_node_coordinates!(node_coordinates, return node_coordinates end + +function init_mortar_neighbor_ids!(mortars::P4estMortarContainer{2}, my_face, + other_face, orientation, neighbor_ielements, + mortar_id) + if orientation == 0 + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[2] + 1 + else + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + 1 + end +end end # @muladd diff --git a/src/solvers/dgsem_t8code/containers_3d.jl b/src/solvers/dgsem_t8code/containers_3d.jl new file mode 100644 index 00000000000..f2d54ff07da --- /dev/null +++ b/src/solvers/dgsem_t8code/containers_3d.jl @@ -0,0 +1,235 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# Interpolate tree_node_coordinates to each quadrant at the specified nodes +function calc_node_coordinates!(node_coordinates, + mesh::T8codeMesh{3}, + nodes::AbstractVector) + # We use `StrideArray`s here since these buffers are used in performance-critical + # places and the additional information passed to the compiler makes them faster + # than native `Array`s. + tmp1 = StrideArray(undef, real(mesh), + StaticInt(3), static_length(nodes), static_length(mesh.nodes), + static_length(mesh.nodes)) + matrix1 = StrideArray(undef, real(mesh), + static_length(nodes), static_length(mesh.nodes)) + matrix2 = similar(matrix1) + matrix3 = similar(matrix1) + baryweights_in = barycentric_weights(mesh.nodes) + + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + current_index = 0 + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + element_level = t8_element_level(eclass_scheme, element) + + # Note, `t8_hex_len` is encoded as an integer (Morton encoding) in + # relation to `t8_hex_root_len`. This line transforms the + # "integer" length to a float in relation to the unit interval [0,1]. + element_length = t8_hex_len(element_level) / t8_hex_root_len + + element_coords = Vector{Float64}(undef, 3) + t8_element_vertex_reference_coords(eclass_scheme, element, 0, + pointer(element_coords)) + + nodes_out_x = (2 * + (element_length * 0.5 * (nodes .+ 1) .+ element_coords[1]) .- + 1) + nodes_out_y = (2 * + (element_length * 0.5 * (nodes .+ 1) .+ element_coords[2]) .- + 1) + nodes_out_z = (2 * + (element_length * 0.5 * (nodes .+ 1) .+ element_coords[3]) .- + 1) + + polynomial_interpolation_matrix!(matrix1, mesh.nodes, nodes_out_x, + baryweights_in) + polynomial_interpolation_matrix!(matrix2, mesh.nodes, nodes_out_y, + baryweights_in) + polynomial_interpolation_matrix!(matrix3, mesh.nodes, nodes_out_z, + baryweights_in) + + multiply_dimensionwise!(view(node_coordinates, :, :, :, :, + current_index += 1), + matrix1, matrix2, matrix3, + view(mesh.tree_node_coordinates, :, :, :, :, + itree + 1), + tmp1) + end + end + + return node_coordinates +end + +# This routine was copied and adapted from `src/dgsem_p4est/containers_3d.jl`: `orientation_to_indices_p4est`. +function init_mortar_neighbor_ids!(mortars::P4estMortarContainer{3}, my_face, + other_face, orientation, neighbor_ielements, + mortar_id) + # my_face and other_face are the face directions (zero-based) + # of "my side" and "other side" respectively. + # Face corner 0 of the face with the lower face direction connects to a corner of the other face. + # The number of this corner is the orientation code in `p4est`. + lower = my_face <= other_face + + # x_pos, y_neg, and z_pos are the directions in which the face has right-handed coordinates + # when looked at from the outside. + my_right_handed = my_face in (1, 2, 5) + other_right_handed = other_face in (1, 2, 5) + + # If both or none are right-handed when looked at from the outside, they will have different + # orientations when looked at from the same side of the interface. + flipped = my_right_handed == other_right_handed + + # In the following illustrations, the face corner numbering of `p4est` is shown. + # ξ and η are the local coordinates of the respective face. + # We're looking at both faces from the same side of the interface, so that "other side" + # (in the illustrations on the left) has right-handed coordinates. + if !flipped + if orientation == 0 + # Corner 0 of other side matches corner 0 of my side + # 2┌──────┐3 2┌──────┐3 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 0└──────┘1 + # η η + # ↑ ↑ + # │ │ + # └───> ξ └───> ξ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[4] + 1 + + elseif ((lower && orientation == 2) # Corner 0 of my side matches corner 2 of other side + || + (!lower && orientation == 1)) # Corner 0 of other side matches corner 1 of my side + # 2┌──────┐3 0┌──────┐2 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 1└──────┘3 + # η ┌───> η + # ↑ │ + # │ ↓ + # └───> ξ ξ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[3] + 1 + + elseif ((lower && orientation == 1) # Corner 0 of my side matches corner 1 of other side + || + (!lower && orientation == 2)) # Corner 0 of other side matches corner 2 of my side + # 2┌──────┐3 3┌──────┐1 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 2└──────┘0 + # η ξ + # ↑ ↑ + # │ │ + # └───> ξ η <───┘ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[2] + 1 + + else # orientation == 3 + # Corner 0 of my side matches corner 3 of other side and + # corner 0 of other side matches corner 3 of my side. + # 2┌──────┐3 1┌──────┐0 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 3└──────┘2 + # η ξ <───┐ + # ↑ │ + # │ ↓ + # └───> ξ η + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[1] + 1 + end + else # flipped + if orientation == 0 + # Corner 0 of other side matches corner 0 of my side + # 2┌──────┐3 1┌──────┐3 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 0└──────┘2 + # η ξ + # ↑ ↑ + # │ │ + # └───> ξ └───> η + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[4] + 1 + + elseif orientation == 2 + # Corner 0 of my side matches corner 2 of other side and + # corner 0 of other side matches corner 2 of my side. + # 2┌──────┐3 0┌──────┐1 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 2└──────┘3 + # η ┌───> ξ + # ↑ │ + # │ ↓ + # └───> ξ η + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[2] + 1 + + elseif orientation == 1 + # Corner 0 of my side matches corner 1 of other side and + # corner 0 of other side matches corner 1 of my side. + # 2┌──────┐3 3┌──────┐2 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 1└──────┘0 + # η η + # ↑ ↑ + # │ │ + # └───> ξ ξ <───┘ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[3] + 1 + + else # orientation == 3 + # Corner 0 of my side matches corner 3 of other side and + # corner 0 of other side matches corner 3 of my side. + # 2┌──────┐3 2┌──────┐0 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 3└──────┘1 + # η η <───┐ + # ↑ │ + # │ ↓ + # └───> ξ ξ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[1] + 1 + end + end +end +end # @muladd diff --git a/src/solvers/dgsem_t8code/dg.jl b/src/solvers/dgsem_t8code/dg.jl index 16a9d7d35b1..6e9660c917d 100644 --- a/src/solvers/dgsem_t8code/dg.jl +++ b/src/solvers/dgsem_t8code/dg.jl @@ -28,4 +28,5 @@ end include("containers.jl") include("containers_2d.jl") +include("containers_3d.jl") end # @muladd diff --git a/src/solvers/dgsem_tree/dg_3d.jl b/src/solvers/dgsem_tree/dg_3d.jl index 0955dc38655..02ff338e912 100644 --- a/src/solvers/dgsem_tree/dg_3d.jl +++ b/src/solvers/dgsem_tree/dg_3d.jl @@ -36,13 +36,15 @@ end # The methods below are specialized on the volume integral type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, volume_integral::VolumeIntegralFluxDifferencing, dg::DG, uEltype) NamedTuple() end -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, volume_integral::VolumeIntegralShockCapturingHG, dg::DG, uEltype) element_ids_dg = Int[] @@ -79,8 +81,8 @@ function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, fstar2_L_threaded, fstar2_R_threaded, fstar3_L_threaded, fstar3_R_threaded) end -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, - equations, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, volume_integral::VolumeIntegralPureLGLFiniteVolume, dg::DG, uEltype) A4dp1_x = Array{uEltype, 4} @@ -112,7 +114,8 @@ end # The methods below are specialized on the mortar type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, uEltype) # TODO: Taal compare performance of different types A3d = Array{uEltype, 3} @@ -140,7 +143,7 @@ end # TODO: Taal discuss/refactor timer, allowing users to pass a custom timer? function rhs!(du, u, t, - mesh::Union{TreeMesh{3}, P4estMesh{3}}, equations, + mesh::Union{TreeMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Reset du @@ -209,8 +212,8 @@ function rhs!(du, u, t, end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, - P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralWeakForm, dg::DGSEM, cache) @@ -264,8 +267,8 @@ See also https://github.com/trixi-framework/Trixi.jl/issues/1671#issuecomment-17 end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, - P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralFluxDifferencing, dg::DGSEM, cache) @@ -378,8 +381,8 @@ end # TODO: Taal dimension agnostic function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, - P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralShockCapturingHG, dg::DGSEM, cache) @@ -437,7 +440,8 @@ function calc_volume_integral!(du, u, end @inline function fv_kernel!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_flux_fv, dg::DGSEM, cache, element, alpha = true) @unpack fstar1_L_threaded, fstar1_R_threaded, fstar2_L_threaded, fstar2_R_threaded, fstar3_L_threaded, fstar3_R_threaded = cache diff --git a/src/solvers/dgsem_tree/indicators_3d.jl b/src/solvers/dgsem_tree/indicators_3d.jl index 40362889397..a11a8e06e4b 100644 --- a/src/solvers/dgsem_tree/indicators_3d.jl +++ b/src/solvers/dgsem_tree/indicators_3d.jl @@ -101,7 +101,8 @@ end alpha[element] = min(alpha_max, alpha_element) end -function apply_smoothing!(mesh::Union{TreeMesh{3}, P4estMesh{3}}, alpha, alpha_tmp, dg, +function apply_smoothing!(mesh::Union{TreeMesh{3}, P4estMesh{3}, T8codeMesh{3}}, alpha, + alpha_tmp, dg, cache) # Diffuse alpha values by setting each alpha to at least 50% of neighboring elements' alpha diff --git a/test/runtests.jl b/test/runtests.jl index 7e195fe7402..49f0977bb70 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -89,6 +89,10 @@ const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3) include("test_t8code_2d.jl") end + @time if TRIXI_TEST == "all" || TRIXI_TEST == "t8code_part2" + include("test_t8code_3d.jl") + end + @time if TRIXI_TEST == "all" || TRIXI_TEST == "unstructured_dgmulti" include("test_unstructured_2d.jl") include("test_dgmulti_1d.jl") diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl index b3e19471323..ab95e068d02 100644 --- a/test/test_t8code_2d.jl +++ b/test/test_t8code_2d.jl @@ -30,7 +30,21 @@ mkdir(outdir) end end +@trixi_testset "test check_for_negative_volumes" begin + @test_warn "Discovered negative volumes" begin + # Unstructured mesh with six cells which have left-handed node ordering. + mesh_file = joinpath(EXAMPLES_DIR, "rectangle_with_negative_volumes.msh") + isfile(mesh_file) || + download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh", + mesh_file) + + # This call should throw a warning about negative volumes detected. + mesh = T8codeMesh(mesh_file, 2) + end +end + @trixi_testset "elixir_advection_basic.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[8.311947673061856e-6], @@ -46,6 +60,7 @@ end end @trixi_testset "elixir_advection_nonconforming_flag.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_nonconforming_flag.jl"), l2=[3.198940059144588e-5], @@ -61,6 +76,7 @@ end end @trixi_testset "elixir_advection_unstructured_flag.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_unstructured_flag.jl"), l2=[0.0005379687442422346], linf=[0.007438525029884735]) @@ -91,6 +107,7 @@ end end @trixi_testset "elixir_advection_amr_solution_independent.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_solution_independent.jl"), # Expected errors are exactly the same as with StructuredMesh! @@ -108,6 +125,7 @@ end end @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), l2=[ @@ -133,6 +151,7 @@ end end @trixi_testset "elixir_euler_free_stream.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), l2=[ 2.063350241405049e-15, @@ -153,6 +172,7 @@ end end @trixi_testset "elixir_euler_shockcapturing_ec.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_shockcapturing_ec.jl"), l2=[ 9.53984675e-02, @@ -178,6 +198,8 @@ end end @trixi_testset "elixir_euler_sedov.jl" begin + # This test is identical to the one in `test_p4est_2d.jl` besides minor + # deviations in the expected error norms. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"), l2=[ 3.76149952e-01, @@ -203,6 +225,7 @@ end end @trixi_testset "elixir_shallowwater_source_terms.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ 9.168126407325352e-5, @@ -228,6 +251,7 @@ end end @trixi_testset "elixir_mhd_alfven_wave.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), l2=[1.0513414461545583e-5, 1.0517900957166411e-6, 1.0517900957304043e-6, 1.511816606372376e-6, @@ -250,6 +274,8 @@ end end @trixi_testset "elixir_mhd_rotor.jl" begin + # This test is identical to the one in `test_p4est_2d.jl` besides minor + # deviations in the expected error norms. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_rotor.jl"), l2=[0.44211360369891683, 0.8805178316216257, 0.8262710688468049, 0.0, diff --git a/test/test_t8code_3d.jl b/test/test_t8code_3d.jl new file mode 100644 index 00000000000..4232cf04094 --- /dev/null +++ b/test/test_t8code_3d.jl @@ -0,0 +1,279 @@ +module TestExamplesT8codeMesh3D + +using Test +using Trixi + +include("test_trixi.jl") + +EXAMPLES_DIR = joinpath(examples_dir(), "t8code_3d_dgsem") + +# Start with a clean environment: remove Trixi.jl output directory if it exists +outdir = "out" +isdir(outdir) && rm(outdir, recursive = true) +mkdir(outdir) + +@testset "T8codeMesh3D" begin + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[0.00016263963870641478], + linf=[0.0014537194925779984]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_advection_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_unstructured_curved.jl"), + l2=[0.0004750004258546538], + linf=[0.026527551737137167]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_advection_nonconforming.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_nonconforming.jl"), + l2=[0.00253595715323843], + linf=[0.016486952252155795]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl` besides minor + # deviations from the expected error norms. + @trixi_testset "elixir_advection_amr.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[1.1302812803902801e-5], + linf=[0.0007889950196294793], + coverage_override=(maxiters = 6, initial_refinement_level = 1, + base_level = 1, med_level = 2, max_level = 3)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl` besides minor + # deviations from the expected error norms. + @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_curved.jl"), + l2=[2.0556575425846923e-5], + linf=[0.00105682693484822], + tspan=(0.0, 1.0), + coverage_override=(maxiters = 6, initial_refinement_level = 0, + base_level = 0, med_level = 1, max_level = 2)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_curved.jl"), + l2=[ + 4.070355207909268e-5, + 4.4993257426833716e-5, + 5.10588457841744e-5, + 5.102840924036687e-5, + 0.00019986264001630542, + ], + linf=[ + 0.0016987332417202072, + 0.003622956808262634, + 0.002029576258317789, + 0.0024206977281964193, + 0.008526972236273522, + ], + tspan=(0.0, 0.01)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonperiodic.jl"), + l2=[ + 0.0015106060984283647, + 0.0014733349038567685, + 0.00147333490385685, + 0.001473334903856929, + 0.0028149479453087093, + ], + linf=[ + 0.008070806335238156, + 0.009007245083113125, + 0.009007245083121784, + 0.009007245083102688, + 0.01562861968368434, + ], + tspan=(0.0, 1.0)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_free_stream.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), + l2=[ + 5.162664597942288e-15, + 1.941857343642486e-14, + 2.0232366394187278e-14, + 2.3381518645408552e-14, + 7.083114561232324e-14, + ], + linf=[ + 7.269740365245525e-13, + 3.289868377720495e-12, + 4.440087186807773e-12, + 3.8686831516088205e-12, + 9.412914891981927e-12, + ], + tspan=(0.0, 0.03)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_free_stream_extruded.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_extruded.jl"), + l2=[ + 8.444868392439035e-16, + 4.889826056731442e-15, + 2.2921260987087585e-15, + 4.268460455702414e-15, + 1.1356712092620279e-14, + ], + linf=[ + 7.749356711883593e-14, + 2.8792246364872653e-13, + 1.1121659149182506e-13, + 3.3228975127030935e-13, + 9.592326932761353e-13, + ], + tspan=(0.0, 0.1)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), + l2=[ + 0.010380390326164493, + 0.006192950051354618, + 0.005970674274073704, + 0.005965831290564327, + 0.02628875593094754, + ], + linf=[ + 0.3326911600075694, + 0.2824952141320467, + 0.41401037398065543, + 0.45574161423218573, + 0.8099577682187109, + ], + tspan=(0.0, 0.2), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl` besides minor + # deviations in the expected error norms. + @trixi_testset "elixir_euler_sedov.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"), + l2=[ + 7.82070951e-02, + 4.33260474e-02, + 4.33260474e-02, + 4.33260474e-02, + 3.75260911e-01, + ], + linf=[ + 7.45329845e-01, + 3.21754792e-01, + 3.21754792e-01, + 3.21754792e-01, + 4.76151527e+00, + ], + tspan=(0.0, 0.3), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end + +# Clean up afterwards: delete Trixi.jl output directory +@test_nowarn rm(outdir, recursive = true) + +end # module From 43dac2c68f255d2e1aa209da4b441fc9f4bae6ac Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Mon, 22 Jan 2024 17:33:19 +0100 Subject: [PATCH 076/166] Remove duplicate code (`rhs_parabolic!`) for 2D, 3D (#1810) * Remove duplicate code * comment --- src/solvers/dgsem_p4est/dg_2d_parabolic.jl | 26 ++++- src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 112 --------------------- src/solvers/dgsem_tree/dg_2d_parabolic.jl | 2 +- src/solvers/dgsem_tree/dg_3d_parabolic.jl | 108 -------------------- 4 files changed, 24 insertions(+), 224 deletions(-) diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index a7f3345168f..299f2f6140a 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -19,9 +19,29 @@ function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::Abstra return cache end -# TODO: Remove in favor of the implementation for the TreeMesh -# once the P4estMesh can handle mortars as well -function rhs_parabolic!(du, u, t, mesh::P4estMesh{2}, +#= +Reusing `rhs_parabolic!` for `TreeMesh`es is not easily possible as +for `P4estMesh`es we call + + ``` + prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + + calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, + mesh, equations_parabolic, dg.mortar, + dg.surface_integral, dg, cache) + ``` +instead of + ``` + prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + ``` +=# +function rhs_parabolic!(du, u, t, mesh::Union{P4estMesh{2}, P4estMesh{3}}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index 0bb97c7af02..83d663809a7 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -19,118 +19,6 @@ function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::Abstra return cache end -# This file collects all methods that have been updated to work with parabolic systems of equations -# -# assumptions: parabolic terms are of the form div(f(u, grad(u))) and -# will be discretized first order form as follows: -# 1. compute grad(u) -# 2. compute f(u, grad(u)) -# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) -# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -# TODO: Remove in favor of the implementation for the TreeMesh -# once the P4estMesh can handle mortars as well -function rhs_parabolic!(du, u, t, mesh::P4estMesh{3}, - equations_parabolic::AbstractEquationsParabolic, - initial_condition, boundary_conditions_parabolic, source_terms, - dg::DG, parabolic_scheme, cache, cache_parabolic) - @unpack viscous_container = cache_parabolic - @unpack u_transformed, gradients, flux_viscous = viscous_container - - # Convert conservative variables to a form more suitable for viscous flux calculations - @trixi_timeit timer() "transform variables" begin - transform_variables!(u_transformed, u, mesh, equations_parabolic, - dg, parabolic_scheme, cache, cache_parabolic) - end - - # Compute the gradients of the transformed variables - @trixi_timeit timer() "calculate gradient" begin - calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, - boundary_conditions_parabolic, dg, cache, cache_parabolic) - end - - # Compute and store the viscous fluxes - @trixi_timeit timer() "calculate viscous fluxes" begin - calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, - equations_parabolic, dg, cache, cache_parabolic) - end - - # The remainder of this function is essentially a regular rhs! for parabolic - # equations (i.e., it computes the divergence of the viscous fluxes) - # - # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have - # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the - # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the - # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it - # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* - # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we - # do not need to recreate the existing data structure only with a different name, and c) we do not - # need to interpolate solutions *and* gradients to the surfaces. - - # TODO: parabolic; reconsider current data structure reuse strategy - - # Reset du - @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) - - # Calculate volume integral - @trixi_timeit timer() "volume integral" begin - calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) - end - - # Prolong solution to interfaces - @trixi_timeit timer() "prolong2interfaces" begin - prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate interface fluxes - @trixi_timeit timer() "interface flux" begin - calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, dg, cache_parabolic) - end - - # Prolong solution to boundaries - @trixi_timeit timer() "prolong2boundaries" begin - prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate boundary fluxes - @trixi_timeit timer() "boundary flux" begin - calc_boundary_flux_divergence!(cache_parabolic, t, - boundary_conditions_parabolic, - mesh, equations_parabolic, - dg.surface_integral, dg) - end - - # Prolong solution to mortars (specialized for AbstractEquationsParabolic) - # !!! NOTE: we reuse the hyperbolic cache here since it contains "mortars" and "u_threaded" - # !!! Is this OK? - @trixi_timeit timer() "prolong2mortars" begin - prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, - dg.mortar, dg.surface_integral, dg) - end - - # Calculate mortar fluxes (specialized for AbstractEquationsParabolic) - @trixi_timeit timer() "mortar flux" begin - calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, - mesh, equations_parabolic, dg.mortar, - dg.surface_integral, dg, cache) - end - - # Calculate surface integrals - @trixi_timeit timer() "surface integral" begin - calc_surface_integral!(du, u, mesh, equations_parabolic, - dg.surface_integral, dg, cache_parabolic) - end - - # Apply Jacobian from mapping to reference element - @trixi_timeit timer() "Jacobian" begin - apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) - end - - return nothing -end - function calc_gradient!(gradients, u_transformed, t, mesh::P4estMesh{3}, equations_parabolic, boundary_conditions_parabolic, dg::DG, diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index 3083ae30680..b1c27343999 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -13,7 +13,7 @@ # 2. compute f(u, grad(u)) # 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) # boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::TreeMesh{2}, +function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{2}, TreeMesh{3}}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index 9ad28c6aa8e..ee0e7c6b069 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -5,114 +5,6 @@ @muladd begin #! format: noindent -# This file collects all methods that have been updated to work with parabolic systems of equations -# -# assumptions: parabolic terms are of the form div(f(u, grad(u))) and -# will be discretized first order form as follows: -# 1. compute grad(u) -# 2. compute f(u, grad(u)) -# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) -# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::TreeMesh{3}, - equations_parabolic::AbstractEquationsParabolic, - initial_condition, boundary_conditions_parabolic, source_terms, - dg::DG, parabolic_scheme, cache, cache_parabolic) - @unpack viscous_container = cache_parabolic - @unpack u_transformed, gradients, flux_viscous = viscous_container - - # Convert conservative variables to a form more suitable for viscous flux calculations - @trixi_timeit timer() "transform variables" begin - transform_variables!(u_transformed, u, mesh, equations_parabolic, - dg, parabolic_scheme, cache, cache_parabolic) - end - - # Compute the gradients of the transformed variables - @trixi_timeit timer() "calculate gradient" begin - calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, - boundary_conditions_parabolic, dg, cache, cache_parabolic) - end - - # Compute and store the viscous fluxes - @trixi_timeit timer() "calculate viscous fluxes" begin - calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, - equations_parabolic, dg, cache, cache_parabolic) - end - - # The remainder of this function is essentially a regular rhs! for parabolic - # equations (i.e., it computes the divergence of the viscous fluxes) - # - # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have - # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the - # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the - # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it - # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* - # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we - # do not need to recreate the existing data structure only with a different name, and c) we do not - # need to interpolate solutions *and* gradients to the surfaces. - - # TODO: parabolic; reconsider current data structure reuse strategy - - # Reset du - @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) - - # Calculate volume integral - @trixi_timeit timer() "volume integral" begin - calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) - end - - # Prolong solution to interfaces - @trixi_timeit timer() "prolong2interfaces" begin - prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate interface fluxes - @trixi_timeit timer() "interface flux" begin - calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, dg, cache_parabolic) - end - - # Prolong solution to boundaries - @trixi_timeit timer() "prolong2boundaries" begin - prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate boundary fluxes - @trixi_timeit timer() "boundary flux" begin - calc_boundary_flux_divergence!(cache_parabolic, t, - boundary_conditions_parabolic, - mesh, equations_parabolic, - dg.surface_integral, dg) - end - - # Prolong solution to mortars - @trixi_timeit timer() "prolong2mortars" begin - prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, - dg.mortar, dg.surface_integral, dg) - end - - # Calculate mortar fluxes - @trixi_timeit timer() "mortar flux" begin - calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, - dg.mortar, dg.surface_integral, dg, cache) - end - - # Calculate surface integrals - @trixi_timeit timer() "surface integral" begin - calc_surface_integral!(du, u, mesh, equations_parabolic, - dg.surface_integral, dg, cache_parabolic) - end - - # Apply Jacobian from mapping to reference element - @trixi_timeit timer() "Jacobian" begin - apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) - end - - return nothing -end - # Transform solution variables prior to taking the gradient # (e.g., conservative to primitive variables). Defaults to doing nothing. # TODO: can we avoid copying data? From ba812be5ae6fb7170a85b9207253c8c89168ea76 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 23 Jan 2024 16:04:58 +0100 Subject: [PATCH 077/166] Add Link to Paper for Weakly Enforced BCs to Docs (#1811) * Add Paper for Weakly Enforced BCs to Docs * spelling --- docs/literate/src/files/non_periodic_boundaries.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/literate/src/files/non_periodic_boundaries.jl b/docs/literate/src/files/non_periodic_boundaries.jl index 7ed6324ff99..3b238ad4533 100644 --- a/docs/literate/src/files/non_periodic_boundaries.jl +++ b/docs/literate/src/files/non_periodic_boundaries.jl @@ -16,6 +16,8 @@ # state as arguments, and solves an approximate Riemann problem to introduce dissipation (and # hence stabilization) at the boundary. Hence, the performance of the Dirichlet BC depends on the # fidelity of the numerical surface flux. +# An easy-to read introductory reference on this topic is the paper by +# [Mengaldo et al.](https://doi.org/10.2514/6.2014-2923). # The passed boundary value function is called with the same arguments as an initial condition # function, i.e. From 585fb93cfba5712401fee00d06aeeedf8db5fe7e Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 25 Jan 2024 10:13:00 +0100 Subject: [PATCH 078/166] Different Boundary Conditions for Standard Abaqus (#1799) * first take on BCs for standard Abaqus * simplify * Couple comments, example * comments * shorten code * refactor * fmt * refactor * 3D * tests and fmt * comment * news * comment * stick to polydeg * polydeg 3 * polydeg * Update examples/p4est_2d_dgsem/elixir_euler_airfoil_mach2.jl Co-authored-by: Andrew Winters * Update examples/p4est_2d_dgsem/elixir_euler_airfoil_mach2.jl Co-authored-by: Andrew Winters * Update src/meshes/p4est_mesh.jl Co-authored-by: Andrew Winters * Update NEWS.md Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl * improve (?) formatting * fmt * split constructor * comment * two boundaries * comment * comment * rename elixir, change mesh * comments * add doc * avoid unicode * improve doc * Add tutorial * typos * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Apply suggestions from code review Polish * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Andrew Winters * Comments * Update p4est_mesh.jl * Update src/meshes/p4est_mesh.jl Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * more robust parsing * print warning if there has been a bc name supplied for which no nodes have been found * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Hendrik Ranocha * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/p4est_mesh.jl Co-authored-by: Hendrik Ranocha * Update docs/literate/src/files/p4est_from_gmsh.jl --------- Co-authored-by: Andrew Winters Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> Co-authored-by: Hendrik Ranocha --- NEWS.md | 2 + docs/literate/src/files/p4est_from_gmsh.jl | 459 ++++++++++++++++++ docs/make.jl | 1 + docs/src/meshes/p4est_mesh.md | 256 +++++++++- .../elixir_euler_NACA6412airfoil_mach2.jl | 108 +++++ .../elixir_euler_free_stream_boundaries.jl | 60 +++ src/meshes/p4est_mesh.jl | 226 ++++++++- test/test_p4est_2d.jl | 21 + test/test_p4est_3d.jl | 23 + 9 files changed, 1145 insertions(+), 11 deletions(-) create mode 100644 docs/literate/src/files/p4est_from_gmsh.jl create mode 100644 examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl create mode 100644 examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl diff --git a/NEWS.md b/NEWS.md index cf695912ed7..3a3a504a911 100644 --- a/NEWS.md +++ b/NEWS.md @@ -9,6 +9,8 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, + can now be digested by Trixi in 2D and 3D. ## Changes when updating to v0.6 from v0.5.x diff --git a/docs/literate/src/files/p4est_from_gmsh.jl b/docs/literate/src/files/p4est_from_gmsh.jl new file mode 100644 index 00000000000..356339cdd47 --- /dev/null +++ b/docs/literate/src/files/p4est_from_gmsh.jl @@ -0,0 +1,459 @@ +#src # `P4estMesh` from [`gmsh`](https://gmsh.info/) + +# Trixi.jl supports numerical approximations from structured and unstructured quadrilateral meshes +# with the [`P4estMesh`](@ref) mesh type. + +# The purpose of this tutorial is to demonstrate how to use the `P4estMesh` +# functionality of Trixi.jl for existing meshes with straight-sided (bilinear) elements/cells. +# This begins by running and visualizing an available unstructured quadrilateral mesh example. +# Then, the tutorial will cover how to use existing meshes generated by [`gmsh`](https://gmsh.info/) +# or any other meshing software that can export to the Abaqus input `.inp` format. + +# ## Running the simulation of a near-field flow around an airfoil + +# Trixi.jl supports solving hyperbolic-parabolic problems on several mesh types. +# A somewhat complex example that employs the `P4estMesh` is the near-field simulation of a +# Mach 2 flow around the NACA6412 airfoil. + +using Trixi +redirect_stdio(stdout=devnull, stderr=devnull) do # code that prints annoying stuff we don't want to see here #hide #md +trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_euler_NACA6412airfoil_mach2.jl"), tspan=(0.0, 0.5)) +end #hide #md + +# Conveniently, we use the Plots package to have a first look at the results: +# ```julia +# using Plots +# pd = PlotData2D(sol) +# plot(pd["rho"]) +# plot!(getmesh(pd)) +# ``` + +# ## Creating a mesh using `gmsh` + +# The creation of an unstructured quadrilateral mesh using `gmsh` is driven by a **geometry file**. +# There are plenty of possibilities for the user, see the [documentation](https://gmsh.info/doc/texinfo/gmsh.html) and [tutorials](https://gitlab.onelab.info/gmsh/gmsh/tree/master/tutorials). + +# To begin, we provide a complete geometry file for the NACA6412 airfoil bounded by a rectangular box. After this we give a breakdown +# of the most important parts required for successful mesh generation that can later be used by the `p4est` library +# and Trixi.jl. +# We emphasize that this near-field mesh should only be used for instructive purposes and not for actual production runs. + +# The associated `NACA6412.geo` file is given below: +# ```c++ +# // GMSH geometry script for a NACA 6412 airfoil with 11 degree angle of attack +# // in a box (near-field mesh). +# // see https://github.com/cfsengineering/GMSH-Airfoil-2D +# // for software to generate gmsh `.geo` geometry files for NACA airfoils. +# +# // outer bounding box +# Point(1) = {-1.25, -0.5, 0, 1.0}; +# Point(2) = {1.25, -0.5, 0, 1.0}; +# Point(3) = {1.25, 0.5, 0, 1.0}; +# Point(4) = {-1.25, 0.5, 0, 1.0}; +# +# // lines of the bounding box +# Line(1) = {1, 2}; +# Line(2) = {2, 3}; +# Line(3) = {3, 4}; +# Line(4) = {4, 1}; +# // outer box +# Line Loop(8) = {1, 2, 3, 4}; +# +# // Settings +# // This value gives the global element size factor (lower -> finer mesh) +# Mesh.CharacteristicLengthFactor = 1.0 * 2^(-3); +# // Insist on quads instead of default triangles +# Mesh.RecombineAll = 1; +# // Violet instead of green base color for better visibility +# Mesh.ColorCarousel = 0; +# +# // points of the airfoil contour +# // Format: {x, y, z, DesiredCellSize}. See the documentation: https://gmsh.info/doc/texinfo/gmsh.html#Points +# // These concrete points are generated using the tool from https://github.com/cfsengineering/GMSH-Airfoil-2D +# Point(5) = {-0.4900332889206208, 0.09933466539753061, 0, 0.125}; +# Point(6) = {-0.4900274857651495, 0.1021542752054094, 0, 0.125}; +# Point(7) = {-0.4894921489729144, 0.1049830248247787, 0, 0.125}; +# Point(8) = {-0.4884253336670712, 0.1078191282319664, 0, 0.125}; +# Point(9) = {-0.4868257975566199, 0.1106599068424483, 0, 0.125}; +# Point(10) = {-0.4846930063965668, 0.1135018003016681, 0, 0.125}; +# Point(11) = {-0.4820271400142729, 0.1163403835785654, 0, 0.125}; +# Point(12) = {-0.4788290988083472, 0.1191703902233889, 0, 0.125}; +# Point(13) = {-0.4751005105908123, 0.1219857416089041, 0, 0.125}; +# Point(14) = {-0.4708437376101668, 0.1247795819332056, 0, 0.125}; +# Point(15) = {-0.4660618835629463, 0.1275443187232316, 0, 0.125}; +# Point(16) = {-0.4607588003749649, 0.1302716685409717, 0, 0.125}; +# Point(17) = {-0.4549390945110529, 0.132952707559475, 0, 0.125}; +# Point(18) = {-0.448608132554204, 0.1355779266432996, 0, 0.125}; +# Point(19) = {-0.4417720457819508, 0.138137290538182, 0, 0.125}; +# Point(20) = {-0.4344377334597768, 0.140620300747629, 0, 0.125}; +# Point(21) = {-0.4266128645686593, 0.1430160616500159, 0, 0.125}; +# Point(22) = {-0.4183058776865576, 0.1453133493887722, 0, 0.125}; +# Point(23) = {-0.4095259787518715, 0.147500683050503, 0, 0.125}; +# Point(24) = {-0.4002831364505879, 0.1495663976315875, 0, 0.125}; +# Point(25) = {-0.3905880749878933, 0.1514987182830453, 0, 0.125}; +# Point(26) = {-0.3804522640292948, 0.1532858353164163, 0, 0.125}; +# Point(27) = {-0.3698879056254708, 0.1549159794501833, 0, 0.125}; +# Point(28) = {-0.3589079179688306, 0.1563774967770029, 0, 0.125}; +# Point(29) = {-0.3475259158676376, 0.1576589229368209, 0, 0.125}; +# Point(30) = {-0.3357561878650377, 0.158749055989923, 0, 0.125}; +# Point(31) = {-0.3236136699747923, 0.1596370274972017, 0, 0.125}; +# Point(32) = {-0.3111139160522804, 0.1603123713324616, 0, 0.125}; +# Point(33) = {-0.298273064867608, 0.160765089773461, 0, 0.125}; +# Point(34) = {-0.2851078039966239, 0.1609857164445887, 0, 0.125}; +# Point(35) = {-0.2716353306943914, 0.160965375714529, 0, 0.125}; +# Point(36) = {-0.2578733099632437, 0.1606958381868515, 0, 0.125}; +# Point(37) = {-0.2438398300730194, 0.1601695719599709, 0, 0.125}; +# Point(38) = {-0.2295533558334121, 0.1593797893750759, 0, 0.125}; +# Point(39) = {-0.2150326799566391, 0.1583204890160489, 0, 0.125}; +# Point(40) = {-0.2002968728818922, 0.1569864927736143, 0, 0.125}; +# Point(41) = {-0.18536523146042, 0.1553734778363979, 0, 0.125}; +# Point(42) = {-0.1702572269208345, 0.1534780035235666, 0, 0.125}; +# Point(43) = {-0.1549924525477129, 0.1512975329264932, 0, 0.125}; +# Point(44) = {-0.1395905715122586, 0.1488304493795921, 0, 0.125}; +# Point(45) = {-0.1240712652914332, 0.1460760678321895, 0, 0.125}; +# Point(46) = {-0.1084541831014299, 0.1430346412430583, 0, 0.125}; +# Point(47) = {-0.09275889275279087, 0.1397073621660917, 0, 0.125}; +# Point(48) = {-0.07700483330818747, 0.1360963597385416, 0, 0.125}; +# Point(49) = {-0.06151286635366404, 0.1323050298149023, 0, 0.125}; +# Point(50) = {-0.04602933219022032, 0.1283521764905442, 0, 0.125}; +# Point(51) = {-0.03051345534800332, 0.1242331665904082, 0, 0.125}; +# Point(52) = {-0.01498163190522334, 0.1199540932779839, 0, 0.125}; +# Point(53) = {0.0005498526140696458, 0.1155214539466913, 0, 0.125}; +# Point(54) = {0.01606484191716884, 0.1109421303284033, 0, 0.125}; +# Point(55) = {0.03154732664394777, 0.106223368423828, 0, 0.125}; +# Point(56) = {0.0469814611314705, 0.1013727584299359, 0, 0.125}; +# Point(57) = {0.06235157928986135, 0.09639821481480275, 0, 0.125}; +# Point(58) = {0.07764220964363855, 0.09130795666388933, 0, 0.125}; +# Point(59) = {0.09283808959671735, 0.08611048839446452, 0, 0.125}; +# Point(60) = {0.1079241789809607, 0.08081458090718853, 0, 0.125}; +# Point(61) = {0.1228856729475325, 0.07542925321638272, 0, 0.125}; +# Point(62) = {0.1377080142575372, 0.06996375457378261, 0, 0.125}; +# Point(63) = {0.1523769050236616, 0.06442754707512513, 0, 0.125}; +# Point(64) = {0.1668783179480157, 0.05883028871526293, 0, 0.125}; +# Point(65) = {0.1811985070933818, 0.05318181683604975, 0, 0.125}; +# Point(66) = {0.1953240182159306, 0.04749213189240609, 0, 0.125}; +# Point(67) = {0.2092416986775084, 0.04177138144606024, 0, 0.125}; +# Point(68) = {0.2229387069452062, 0.03602984428372727, 0, 0.125}; +# Point(69) = {0.2364025216754475, 0.03027791454712048, 0, 0.125}; +# Point(70) = {0.2496209503696738, 0.02452608575629232, 0, 0.125}; +# Point(71) = {0.2625821375791982, 0.01878493460541621, 0, 0.125}; +# Point(72) = {0.2752745726282818, 0.01306510441121807, 0, 0.125}; +# Point(73) = {0.28768709681727, 0.007377288098728577, 0, 0.125}; +# Point(74) = {0.2998089100619555, 0.001732210616722449, 0, 0.125}; +# Point(75) = {0.3116295769214332, -0.003859389314124759, 0, 0.125}; +# Point(76) = {0.3231390319647309, -0.009386778203927332, 0, 0.125}; +# Point(77) = {0.3343275844265582, -0.01483924761490708, 0, 0.125}; +# Point(78) = {0.3451859221046181, -0.02020613485126957, 0, 0.125}; +# Point(79) = {0.3557051144551212, -0.02547684454806881, 0, 0.125}; +# Point(80) = {0.3658766148492779, -0.03064087116872238, 0, 0.125}; +# Point(81) = {0.3756922619615632, -0.0356878223992288, 0, 0.125}; +# Point(82) = {0.3851442802702071, -0.0406074434050937, 0, 0.125}; +# Point(83) = {0.394225279661484, -0.04538964189492445, 0, 0.125}; +# Point(84) = {0.4029282541416501, -0.05002451391298904, 0, 0.125}; +# Point(85) = {0.4112465796735204, -0.05450237026215737, 0, 0.125}; +# Point(86) = {0.4191740111683733, -0.05881376343890812, 0, 0.125}; +# Point(87) = {0.4267046786777481, -0.06294951494382847, 0, 0.125}; +# Point(88) = {0.4338330828434404, -0.06690074281456823, 0, 0.125}; +# Point(89) = {0.4405540896772232, -0.07065888921378868, 0, 0.125}; +# Point(90) = {0.4468629247542237, -0.07421574789251445, 0, 0.125}; +# Point(91) = {0.4527551669150955, -0.0775634913396257, 0, 0.125}; +# Point(92) = {0.4582267415819197, -0.08069469742118066, 0, 0.125}; +# Point(93) = {0.4632739138007936, -0.08360237530891265, 0, 0.125}; +# Point(94) = {0.4678932811302005, -0.08627999049569551, 0, 0.125}; +# Point(95) = {0.4720817664982195, -0.08872148869699745, 0, 0.125}; +# Point(96) = {0.4758366111533843, -0.09092131844134463, 0, 0.125}; +# Point(97) = {0.4791553678333992, -0.09287445215953141, 0, 0.125}; +# Point(98) = {0.4820358942729613, -0.09457640559161551, 0, 0.125}; +# Point(99) = {0.4844763471666588, -0.09602325534252773, 0, 0.125}; +# Point(100) = {0.4864751766953637, -0.09721165443119822, 0, 0.125}; +# Point(101) = {0.4880311217148797, -0.09813884569428721, 0, 0.125}; +# Point(102) = {0.4891432056939881, -0.09880267292366274, 0, 0.125}; +# Point(103) = {0.4898107334756874, -0.09920158963645126, 0, 0.125}; +# Point(104) = {0.4900332889206208, -0.09933466539753058, 0, 0.125}; +# Point(105) = {0.4897824225031319, -0.09926905587549506, 0, 0.125}; +# Point(106) = {0.4890301110661922, -0.09907236506934192, 0, 0.125}; +# Point(107) = {0.4877772173496635, -0.09874500608402761, 0, 0.125}; +# Point(108) = {0.48602517690576, -0.09828766683852558, 0, 0.125}; +# Point(109) = {0.4837759946062035, -0.09770130916007558, 0, 0.125}; +# Point(110) = {0.4810322398085871, -0.09698716747297723, 0, 0.125}; +# Point(111) = {0.4777970402368822, -0.09614674703990023, 0, 0.125}; +# Point(112) = {0.4740740746447117, -0.09518182170326678, 0, 0.125}; +# Point(113) = {0.4698675643422793, -0.09409443106501386, 0, 0.125}; +# Point(114) = {0.4651822636784212, -0.09288687703518478, 0, 0.125}; +# Point(115) = {0.460023449577924, -0.09156171967354482, 0, 0.125}; +# Point(116) = {0.4543969102408585, -0.09012177224394632, 0, 0.125}; +# Point(117) = {0.4483089331151018, -0.08857009539864649, 0, 0.125}; +# Point(118) = {0.4417662922553667, -0.08690999040934186, 0, 0.125}; +# Point(119) = {0.4347762351819332, -0.0851449913634191, 0, 0.125}; +# Point(120) = {0.4273464693498908, -0.08327885624791403, 0, 0.125}; +# Point(121) = {0.419485148335155, -0.08131555684993674, 0, 0.125}; +# Point(122) = {0.411200857836944, -0.07925926741086739, 0, 0.125}; +# Point(123) = {0.4025026015879757, -0.07711435198240155, 0, 0.125}; +# Point(124) = {0.3933997872536054, -0.07488535044544484, 0, 0.125}; +# Point(125) = {0.3839022123897198, -0.07257696316779733, 0, 0.125}; +# Point(126) = {0.3740200505167618, -0.07019403429336624, 0, 0.125}; +# Point(127) = {0.3637638373540689, -0.06774153367408606, 0, 0.125}; +# Point(128) = {0.3531444572451353, -0.06522453747557577, 0, 0.125}; +# Point(129) = {0.3421731297908021, -0.06264820750853495, 0, 0.125}; +# Point(130) = {0.3308613966940724, -0.06001776935966011, 0, 0.125}; +# Point(131) = {0.3192211088076166, -0.05733848941811218, 0, 0.125}; +# Point(132) = {0.3072644133633567, -0.05461565091590426, 0, 0.125}; +# Point(133) = {0.2950037413531683, -0.05185452912263369, 0, 0.125}; +# Point(134) = {0.2824517950208982, -0.04906036585632723, 0, 0.125}; +# Point(135) = {0.2696215354188702, -0.04623834349241404, 0, 0.125}; +# Point(136) = {0.2565261699769623, -0.04339355867155523, 0, 0.125}; +# Point(137) = {0.2431791400293651, -0.04053099592384862, 0, 0.125}; +# Point(138) = {0.2295941082432855, -0.03765550144139543, 0, 0.125}; +# Point(139) = {0.2157849458952252, -0.03477175724299444, 0, 0.125}; +# Point(140) = {0.2017657199439165, -0.03188425598348005, 0, 0.125}; +# Point(141) = {0.187550679854507, -0.02899727666564914, 0, 0.125}; +# Point(142) = {0.1731542441359161, -0.02611486151457043, 0, 0.125}; +# Point(143) = {0.1585909865622793, -0.02324079427214604, 0, 0.125}; +# Point(144) = {0.1438756220597465, -0.02037858016395433, 0, 0.125}; +# Point(145) = {0.129022992251319, -0.0175314277805827, 0, 0.125}; +# Point(146) = {0.1140480506645569, -0.01470223310184333, 0, 0.125}; +# Point(147) = {0.09896584761949168, -0.01189356587453844, 0, 0.125}; +# Point(148) = {0.08379151482656089, -0.009107658532933174, 0, 0.125}; +# Point(149) = {0.06854024973648176, -0.006346397826038436, 0, 0.125}; +# Point(150) = {0.05322729969528361, -0.003611319287478529, 0, 0.125}; +# Point(151) = {0.03786794596792287, -0.00090360465249055, 0, 0.125}; +# Point(152) = {0.0224774877026287, 0.00177591770710904, 0, 0.125}; +# Point(153) = {0.007071225915134205, 0.004426769294862437, 0, 0.125}; +# Point(154) = {-0.00833555242305456, 0.007048814950562587, 0, 0.125}; +# Point(155) = {-0.02372759010533726, 0.009642253300220296, 0, 0.125}; +# Point(156) = {-0.03908967513210498, 0.01220760427359278, 0, 0.125}; +# Point(157) = {-0.05440665578848514, 0.01474569380579989, 0, 0.125}; +# Point(158) = {-0.06966345527617318, 0.01725763587663899, 0, 0.125}; +# Point(159) = {-0.08484508582421563, 0.01974481207672138, 0, 0.125}; +# Point(160) = {-0.09987987792382108, 0.02219618763023203, 0, 0.125}; +# Point(161) = {-0.1145078729404739, 0.02450371976411331, 0, 0.125}; +# Point(162) = {-0.1290321771824579, 0.0267015185742735, 0, 0.125}; +# Point(163) = {-0.143440065923266, 0.02879471001709845, 0, 0.125}; +# Point(164) = {-0.1577189448447794, 0.03078883518202784, 0, 0.125}; +# Point(165) = {-0.1718563428491159, 0.03268980457290044, 0, 0.125}; +# Point(166) = {-0.1858399037768357, 0.03450385196323842, 0, 0.125}; +# Point(167) = {-0.1996573773370766, 0.03623748825421298, 0, 0.125}; +# Point(168) = {-0.2132966095779342, 0.03789745574015834, 0, 0.125}; +# Point(169) = {-0.2267455332406906, 0.0394906831577609, 0, 0.125}; +# Point(170) = {-0.2399921583489679, 0.04102424186233269, 0, 0.125}; +# Point(171) = {-0.2530245633834605, 0.04250530343879837, 0, 0.125}; +# Point(172) = {-0.2658308873846617, 0.04394109901707172, 0, 0.125}; +# Point(173) = {-0.2783993233102972, 0.04533888052223981, 0, 0.125}; +# Point(174) = {-0.2907181129514687, 0.04670588405019788, 0, 0.125}; +# Point(175) = {-0.3027755436824813, 0.0480492955198111, 0, 0.125}; +# Point(176) = {-0.3145599472847223, 0.04937621871394801, 0, 0.125}; +# Point(177) = {-0.3260597010456697, 0.05069364578437131, 0, 0.125}; +# Point(178) = {-0.337263231291058, 0.05200843025992359, 0, 0.125}; +# Point(179) = {-0.3481590194623916, 0.05332726256406103, 0, 0.125}; +# Point(180) = {-0.3587356108043638, 0.05465664801682354, 0, 0.125}; +# Point(181) = {-0.3689816256782782, 0.0560028872679817, 0, 0.125}; +# Point(182) = {-0.3788857734692287, 0.05737205908247899, 0, 0.125}; +# Point(183) = {-0.3884368690074614, 0.05877000537646382, 0, 0.125}; +# Point(184) = {-0.3976238513788748, 0.06020231838219783, 0, 0.125}; +# Point(185) = {-0.40643580495675, 0.06167432980291591, 0, 0.125}; +# Point(186) = {-0.4148619824472646, 0.06319110180426264, 0, 0.125}; +# Point(187) = {-0.4228918297057104, 0.06475741967717524, 0, 0.125}; +# Point(188) = {-0.43051501204915, 0.06637778599795482, 0, 0.125}; +# Point(189) = {-0.4377214417649294, 0.06805641610468524, 0, 0.125}; +# Point(190) = {-0.4445013064933708, 0.06979723470503821, 0, 0.125}; +# Point(191) = {-0.4508450981473512, 0.07160387342876083, 0, 0.125}; +# Point(192) = {-0.4567436420215075, 0.073479669138689, 0, 0.125}; +# Point(193) = {-0.4621881257395756, 0.07542766281688272, 0, 0.125}; +# Point(194) = {-0.4671701276898881, 0.07745059884734995, 0, 0.125}; +# Point(195) = {-0.471681644606229, 0.07955092452372269, 0, 0.125}; +# Point(196) = {-0.4757151179639407, 0.0817307896190848, 0, 0.125}; +# Point(197) = {-0.4792634588791559, 0.0839920458658267, 0, 0.125}; +# Point(198) = {-0.4823200712220043, 0.08633624620581726, 0, 0.125}; +# Point(199) = {-0.4848788726822436, 0.08876464368523246, 0, 0.125}; +# Point(200) = {-0.4869343135575803, 0.09127818988394577, 0, 0.125}; +# Point(201) = {-0.4884813930704814, 0.09387753278635144, 0, 0.125}; +# Point(202) = {-0.4895156730580155, 0.09656301401871749, 0, 0.125}; +# +# // splines of the airfoil +# Spline(5) = {5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104}; +# Spline(6) = {104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,5}; +# +# // airfoil +# Line Loop(9) = {5, 6}; +# // complete domain +# Plane Surface(1) = {8, 9}; +# +# // labeling of the boundary parts +# Physical Line(1) = {4}; // inflow +# Physical Line(2) = {2}; // outflow +# Physical Line(3) = {1, 3}; // airfoil +# Physical Line(4) = {5, 6}; // upper/lower wall +# Physical Surface(1) = {10}; +# ``` +# From which we can construct a mesh like this: +# ![mesh_screenshot](https://github.com/trixi-framework/Trixi.jl/assets/75639095/67adfe3d-d403-4cd3-acaa-971a34df0709) +# +# The first four points define the bounding box = (near-field) domain: +# ```c++ +# // outer bounding box +# Point(1) = {-1.25, -0.5, 0, 1.0}; +# Point(2) = {1.25, -0.5, 0, 1.0}; +# Point(3) = {1.25, 0.5, 0, 1.0}; +# Point(4) = {-1.25, 0.5, 0, 1.0}; +# ``` +# which is constructed from connecting the points in lines: +# ```c++ +# // outer box +# Line(1) = {1, 2}; +# Line(2) = {2, 3}; +# Line(3) = {3, 4}; +# Line(4) = {4, 1}; +# // outer box +# Line Loop(8) = {1, 2, 3, 4}; +# ``` +# +# This is followed by a couple (in principle optional) settings where the most important one is +# ```c++ +# // Insist on quads instead of default triangles +# Mesh.RecombineAll = 1; +# ``` +# which forces `gmsh` to generate quadrilateral elements instead of the default triangles. +# This is strictly required to be able to use the mesh later with `p4est`, which supports only straight-sided quads, +# i.e., `C2D4, CPS4, S4` in 2D and `C3D` in 3D. +# See for more details the (short) [documentation](https://p4est.github.io/p4est-howto.pdf) on the interaction of `p4est` with `.inp` files. +# In principle, it should also be possible to use the `recombine` function of `gmsh` to convert the triangles to quads, +# but this is observed to be less robust than enforcing quads from the beginning. +# +# Then the airfoil is defined by a set of points: +# ```c++ +# // points of the airfoil contour +# Point(5) = {-0.4900332889206208, 0.09933466539753061, 0, 0.125}; +# Point(6) = {-0.4900274857651495, 0.1021542752054094, 0, 0.125}; +# ... +# ``` +# which are connected by splines for the upper and lower part of the airfoil: +# ```c++ +# // splines of the airfoil +# Spline(5) = {5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20, +# ... +# 96,97,98,99,100,101,102,103,104}; +# Spline(6) = {104,105,106,107,108,109,110,111,112,113,114,115, +# ... +# 200,201,202,5}; +# ``` +# which are then connected to form a single line loop for easy physical group assignment: +# ```c++ +# // airfoil +# Line Loop(9) = {5, 6}; +# ``` +# +# At the end of the file the physical groups are defined: +# ```c++ +# // labeling of the boundary parts +# Physical Line(1) = {4}; // Inflow. Label in Abaqus .inp file: PhysicalLine1 +# Physical Line(2) = {2}; // Outflow. Label in Abaqus .inp file: PhysicalLine2 +# Physical Line(3) = {1, 3}; // Airfoil. Label in Abaqus .inp file: PhysicalLine3 +# Physical Line(4) = {5, 6}; //Upper and lower wall/farfield/... Label in Abaqus .inp file: PhysicalLine4 +# ``` +# which are crucial for the correct assignment of boundary conditions in `Trixi.jl`. +# In particular, it is the responsibility of a user to keep track on the physical boundary names between the mesh generation and assignment of boundary condition functions in an elixir. +# +# After opening this file in `gmsh`, meshing the geometry and exporting to Abaqus `.inp` format, +# we can have a look at the input file: +# ``` +# *Heading +# +# *NODE +# 1, -1.25, -0.5, 0 +# 2, 1.25, -0.5, 0 +# 3, 1.25, 0.5, 0 +# 4, -1.25, 0.5, 0 +# ... +# ******* E L E M E N T S ************* +# *ELEMENT, type=T3D2, ELSET=Line1 +# 1, 1, 7 +# ... +# *ELEMENT, type=CPS4, ELSET=Surface1 +# 191, 272, 46, 263, 807 +# ... +# *NSET,NSET=PhysicalLine1 +# 1, 4, 52, 53, 54, 55, 56, 57, 58, +# *NSET,NSET=PhysicalLine2 +# 2, 3, 26, 27, 28, 29, 30, 31, 32, +# *NSET,NSET=PhysicalLine3 +# 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, +# 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, +# 23, 24, 25, 33, 34, 35, 36, 37, 38, 39, +# 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, +# 50, 51, +# *NSET,NSET=PhysicalLine4 +# 5, 6, 59, 60, 61, 62, 63, 64, 65, 66, +# 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, +# 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, +# 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, +# 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, +# 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, +# 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, +# 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, +# 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, +# 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, +# 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, +# 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, +# 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, +# 187, 188, 189, 190, +# ``` +# +# First, the coordinates of the nodes are listed, followed by the elements. +# Note that `gmsh` exports also line elements of type `T3D2` which are ignored by `p4est`. +# The relevant elements in 2D which form the gridcells are of type `CPS4` which are defined by their four corner nodes. +# This is followed by the nodesets encoded via `*NSET` which are used to assign boundary conditions in Trixi.jl. +# Trixi.jl parses the `.inp` file and assigns the edges (in 2D, surfaces in 3D) of elements to the corresponding boundary condition based on +# the supplied `boundary_symbols` that have to be supplied to the `P4estMesh` constructor: +# ```julia +# # boundary symbols +# boundary_symbols = [:PhysicalLine1, :PhysicalLine2, :PhysicalLine3, :PhysicalLine4] +# mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) +# ``` +# The same boundary symbols have then also be supplied to the semidiscretization alongside the +# corresponding physical boundary conditions: +# ```julia +# # Supersonic inflow boundary condition. +# # Calculate the boundary flux entirely from the external solution state, i.e., set +# # external solution state values for everything entering the domain. +# @inline function boundary_condition_supersonic_inflow(u_inner, +# normal_direction::AbstractVector, +# x, t, surface_flux_function, +# equations::CompressibleEulerEquations2D) +# u_boundary = initial_condition_mach2_flow(x, t, equations) +# flux = Trixi.flux(u_boundary, normal_direction, equations) +# +# return flux +# end +# +# # Supersonic outflow boundary condition. +# # Calculate the boundary flux entirely from the internal solution state. Analogous to supersonic inflow +# # except all the solution state values are set from the internal solution as everything leaves the domain +# @inline function boundary_condition_supersonic_outflow(u_inner, +# normal_direction::AbstractVector, x, +# t, +# surface_flux_function, +# equations::CompressibleEulerEquations2D) +# flux = Trixi.flux(u_inner, normal_direction, equations) +# +# boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary +# :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary +# :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil +# :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary +# +# semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, +# boundary_conditions = boundary_conditions) +# ``` +# Note that you **have to** supply the `boundary_symbols` keyword to the `P4estMesh` constructor +# to select the boundaries from the available nodesets in the `.inp` file. +# If the `boundary_symbols` keyword is not supplied, all boundaries will be assigned to the default set `:all`. + +# ## Package versions + +# These results were obtained using the following versions. + +using InteractiveUtils +versioninfo() + +using Pkg +Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots", "Download"], + mode=PKGMODE_MANIFEST) diff --git a/docs/make.jl b/docs/make.jl index df8ac04be12..dee87371bd1 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -65,6 +65,7 @@ files = [ "Adaptive mesh refinement" => "adaptive_mesh_refinement.jl", "Structured mesh with curvilinear mapping" => "structured_mesh_mapping.jl", "Unstructured meshes with HOHQMesh.jl" => "hohqmesh_tutorial.jl", + "P4est mesh from gmsh" => "p4est_from_gmsh.jl", # Topic: other stuff "Explicit time stepping" => "time_stepping.jl", "Differentiable programming" => "differentiable_programming.jl", diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index 1e5d782ebb6..db75346cab3 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -55,7 +55,7 @@ This heading is used to indicate to the mesh constructor which of the above mapp create a curvilinear mesh. If the Abaqus file header is **not** present then the `P4estMesh` is created with the first strategy above. -#### List of corner nodes +#### List of corner nodes Next, prefaced with `*NODE`, comes a list of the physical `(x,y,z)` coordinates of all the corners. The first integer in the list of the corners provides its id number. @@ -71,7 +71,7 @@ Thus, for the two-dimensional example mesh this block of corner information is 7, 3.0, -1.0, 0.0 ``` -#### List of elements +#### List of elements The element connectivity is given after the list of corners. The header for this information block is ``` @@ -98,7 +98,9 @@ The construction of the element neighbor ids and identifying physical boundary s directly from the [`p4est`](https://github.com/cburstedde/p4est) library. For example, the neighbor connectivity is created in the mesh constructor using the wrapper `read_inp_p4est` function. -#### HOHQMesh boundary information +#### Encoding of boundaries + +##### HOHQMesh boundary information If present, any additional information in the mesh file that was created by `HOHQMesh` is prefaced with `** ` to make it an Abaqus comment. @@ -230,8 +232,38 @@ For completeness, we provide the entire Abaqus mesh file for the example mesh in ** Bottom --- Right --- ``` +##### Standard Abaqus format boundary information + +As an alternative to an Abaqus mesh generated by `HOHQMesh`, `.inp` files with boundary information encoded as nodesets `*NSET,NSET=` can be used to construct a `p4est` mesh. +This is especially useful for usage of existing meshes (consisting of bilinear elements) which could stem from the popular [`gmsh`](https://gmsh.info/) meshing software. + +In addition to the list of [nodes](#nodes) and [elements](#elements) given above, there are nodesets of the form +``` +*NSET,NSET=PhysicalLine1 +1, 4, 52, 53, 54, 55, 56, 57, 58, +``` +present which are used to associate the edges defined through their corner nodes with a label. In this case it is called `PhysicalLine1`. +By looping over every element and its associated edges, consisting of two nodes, we query the read in `NSET`s if the current node pair is present. + +To prevent that every nodeset following `*NSET,NSET=` is treated as a boundary, the user must supply a `boundary_symbols` keyword to the [`P4estMesh`](@ref) constructor: + +```julia +boundary_symbols = [:PhysicalLine1] + +mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) +``` +By doing so, only nodesets with a label present in `boundary_symbols` are treated as physical boundaries. +Other nodesets that could be used for diagnostics are not treated as external boundaries. +Note that there is a leading colon `:` compared to the label in the `.inp` mesh file. +This is required to turn the label into a [`Symbol`](https://docs.julialang.org/en/v1/manual/metaprogramming/#Symbols). + +A 2D example for this mesh, which is read-in for an unstructured mesh file created with `gmsh`, is presented in +`examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl`. + ### Mesh in three spatial dimensions +#### `HOHQMesh`-Extended Abaqus format + The 3D Abaqus file format with high-order boundary information from `HOHQMesh` is very similar to the 2D version discussed above. There are only three changes: @@ -346,4 +378,222 @@ transfinite map of the straight sided hexahedral element to find \mathbf{X}(\boldsymbol{\xi}) = \boldsymbol\Sigma(\boldsymbol{\xi}) - \mathcal{C}_{\texttt{edge}}(\boldsymbol{\xi}) + \mathbf{X}_{linear}(\boldsymbol{\xi}). +``` + +#### Construction from standard Abaqus + +Also for a mesh in standard Abaqus format there are no qualitative changes when going from 2D to 3D. +The most notable difference is that boundaries are formed in 3D by faces defined by four nodes while in 2D boundaries are edges consisting of two elements. +A simple mesh file, which is used also in `examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl`, is given below: +``` +*Heading + +*NODE +1, -2, 0, 0 +2, -1, 0, 0 +3, -1, 1, 0 +4, -2, 1, 0 +5, -2, 0, 1 +6, -1, 0, 1 +7, -1, 1, 1 +8, -2, 1, 1 +9, -1.75, 1, 0 +10, -1.5, 1, 0 +11, -1.25, 1, 0 +12, -1, 0.75000000000035, 0 +13, -1, 0.50000000000206, 0 +14, -1, 0.25000000000104, 0 +15, -1.25, 0, 0 +16, -1.5, 0, 0 +17, -1.75, 0, 0 +18, -2, 0.24999999999941, 0 +19, -2, 0.49999999999869, 0 +20, -2, 0.74999999999934, 0 +21, -1.75, 0, 1 +22, -1.5, 0, 1 +23, -1.25, 0, 1 +24, -1, 0.24999999999941, 1 +25, -1, 0.49999999999869, 1 +26, -1, 0.74999999999934, 1 +27, -1.25, 1, 1 +28, -1.5, 1, 1 +29, -1.75, 1, 1 +30, -2, 0.75000000000035, 1 +31, -2, 0.50000000000206, 1 +32, -2, 0.25000000000104, 1 +33, -2, 0, 0.24999999999941 +34, -2, 0, 0.49999999999869 +35, -2, 0, 0.74999999999934 +36, -2, 1, 0.24999999999941 +37, -2, 1, 0.49999999999869 +38, -2, 1, 0.74999999999934 +39, -1, 0, 0.24999999999941 +40, -1, 0, 0.49999999999869 +41, -1, 0, 0.74999999999934 +42, -1, 1, 0.24999999999941 +43, -1, 1, 0.49999999999869 +44, -1, 1, 0.74999999999934 +45, -1.25, 0.25000000000063, 0 +46, -1.25, 0.50000000000122, 0 +47, -1.25, 0.7500000000001, 0 +48, -1.5, 0.25000000000023, 0 +49, -1.5, 0.50000000000038, 0 +50, -1.5, 0.74999999999984, 0 +51, -1.75, 0.24999999999982, 0 +52, -1.75, 0.49999999999953, 0 +53, -1.75, 0.74999999999959, 0 +54, -1.75, 0.25000000000063, 1 +55, -1.75, 0.50000000000122, 1 +56, -1.75, 0.7500000000001, 1 +57, -1.5, 0.25000000000023, 1 +58, -1.5, 0.50000000000038, 1 +59, -1.5, 0.74999999999984, 1 +60, -1.25, 0.24999999999982, 1 +61, -1.25, 0.49999999999953, 1 +62, -1.25, 0.74999999999959, 1 +63, -2, 0.24999999999982, 0.24999999999941 +64, -2, 0.49999999999953, 0.24999999999941 +65, -2, 0.74999999999959, 0.24999999999941 +66, -2, 0.25000000000023, 0.49999999999869 +67, -2, 0.50000000000038, 0.49999999999869 +68, -2, 0.74999999999984, 0.49999999999869 +69, -2, 0.25000000000063, 0.74999999999934 +70, -2, 0.50000000000122, 0.74999999999934 +71, -2, 0.7500000000001, 0.74999999999934 +72, -1.25, 1, 0.74999999999934 +73, -1.25, 1, 0.49999999999869 +74, -1.25, 1, 0.24999999999941 +75, -1.5, 1, 0.74999999999934 +76, -1.5, 1, 0.49999999999869 +77, -1.5, 1, 0.24999999999941 +78, -1.75, 1, 0.74999999999934 +79, -1.75, 1, 0.49999999999869 +80, -1.75, 1, 0.24999999999941 +81, -1, 0.25000000000063, 0.24999999999941 +82, -1, 0.50000000000122, 0.24999999999941 +83, -1, 0.7500000000001, 0.24999999999941 +84, -1, 0.25000000000023, 0.49999999999869 +85, -1, 0.50000000000038, 0.49999999999869 +86, -1, 0.74999999999984, 0.49999999999869 +87, -1, 0.24999999999982, 0.74999999999934 +88, -1, 0.49999999999953, 0.74999999999934 +89, -1, 0.74999999999959, 0.74999999999934 +90, -1.75, 0, 0.74999999999934 +91, -1.75, 0, 0.49999999999869 +92, -1.75, 0, 0.24999999999941 +93, -1.5, 0, 0.74999999999934 +94, -1.5, 0, 0.49999999999869 +95, -1.5, 0, 0.24999999999941 +96, -1.25, 0, 0.74999999999934 +97, -1.25, 0, 0.49999999999869 +98, -1.25, 0, 0.24999999999941 +99, -1.75, 0.25000000000043, 0.74999999999934 +100, -1.75, 0.25000000000023, 0.49999999999869 +101, -1.75, 0.25000000000002, 0.24999999999941 +102, -1.75, 0.5000000000008, 0.74999999999934 +103, -1.75, 0.50000000000038, 0.49999999999869 +104, -1.75, 0.49999999999995, 0.24999999999941 +105, -1.75, 0.74999999999997, 0.74999999999934 +106, -1.75, 0.74999999999984, 0.49999999999869 +107, -1.75, 0.74999999999972, 0.24999999999941 +108, -1.5, 0.25000000000023, 0.74999999999934 +109, -1.5, 0.25000000000023, 0.49999999999869 +110, -1.5, 0.25000000000023, 0.24999999999941 +111, -1.5, 0.50000000000038, 0.74999999999934 +112, -1.5, 0.50000000000038, 0.49999999999869 +113, -1.5, 0.50000000000038, 0.24999999999941 +114, -1.5, 0.74999999999984, 0.74999999999934 +115, -1.5, 0.74999999999984, 0.49999999999869 +116, -1.5, 0.74999999999984, 0.24999999999941 +117, -1.25, 0.25000000000002, 0.74999999999934 +118, -1.25, 0.25000000000023, 0.49999999999869 +119, -1.25, 0.25000000000043, 0.24999999999941 +120, -1.25, 0.49999999999995, 0.74999999999934 +121, -1.25, 0.50000000000038, 0.49999999999869 +122, -1.25, 0.5000000000008, 0.24999999999941 +123, -1.25, 0.74999999999972, 0.74999999999934 +124, -1.25, 0.74999999999984, 0.49999999999869 +125, -1.25, 0.74999999999997, 0.24999999999941 +******* E L E M E N T S ************* +*ELEMENT, type=C3D8, ELSET=Volume1 +153, 54, 21, 5, 32, 99, 90, 35, 69 +154, 99, 90, 35, 69, 100, 91, 34, 66 +155, 100, 91, 34, 66, 101, 92, 33, 63 +156, 101, 92, 33, 63, 51, 17, 1, 18 +157, 55, 54, 32, 31, 102, 99, 69, 70 +158, 102, 99, 69, 70, 103, 100, 66, 67 +159, 103, 100, 66, 67, 104, 101, 63, 64 +160, 104, 101, 63, 64, 52, 51, 18, 19 +161, 56, 55, 31, 30, 105, 102, 70, 71 +162, 105, 102, 70, 71, 106, 103, 67, 68 +163, 106, 103, 67, 68, 107, 104, 64, 65 +164, 107, 104, 64, 65, 53, 52, 19, 20 +165, 29, 56, 30, 8, 78, 105, 71, 38 +166, 78, 105, 71, 38, 79, 106, 68, 37 +167, 79, 106, 68, 37, 80, 107, 65, 36 +168, 80, 107, 65, 36, 9, 53, 20, 4 +169, 57, 22, 21, 54, 108, 93, 90, 99 +170, 108, 93, 90, 99, 109, 94, 91, 100 +171, 109, 94, 91, 100, 110, 95, 92, 101 +172, 110, 95, 92, 101, 48, 16, 17, 51 +173, 58, 57, 54, 55, 111, 108, 99, 102 +174, 111, 108, 99, 102, 112, 109, 100, 103 +175, 112, 109, 100, 103, 113, 110, 101, 104 +176, 113, 110, 101, 104, 49, 48, 51, 52 +177, 59, 58, 55, 56, 114, 111, 102, 105 +178, 114, 111, 102, 105, 115, 112, 103, 106 +179, 115, 112, 103, 106, 116, 113, 104, 107 +180, 116, 113, 104, 107, 50, 49, 52, 53 +181, 28, 59, 56, 29, 75, 114, 105, 78 +182, 75, 114, 105, 78, 76, 115, 106, 79 +183, 76, 115, 106, 79, 77, 116, 107, 80 +184, 77, 116, 107, 80, 10, 50, 53, 9 +185, 60, 23, 22, 57, 117, 96, 93, 108 +186, 117, 96, 93, 108, 118, 97, 94, 109 +187, 118, 97, 94, 109, 119, 98, 95, 110 +188, 119, 98, 95, 110, 45, 15, 16, 48 +189, 61, 60, 57, 58, 120, 117, 108, 111 +190, 120, 117, 108, 111, 121, 118, 109, 112 +191, 121, 118, 109, 112, 122, 119, 110, 113 +192, 122, 119, 110, 113, 46, 45, 48, 49 +193, 62, 61, 58, 59, 123, 120, 111, 114 +194, 123, 120, 111, 114, 124, 121, 112, 115 +195, 124, 121, 112, 115, 125, 122, 113, 116 +196, 125, 122, 113, 116, 47, 46, 49, 50 +197, 27, 62, 59, 28, 72, 123, 114, 75 +198, 72, 123, 114, 75, 73, 124, 115, 76 +199, 73, 124, 115, 76, 74, 125, 116, 77 +200, 74, 125, 116, 77, 11, 47, 50, 10 +201, 24, 6, 23, 60, 87, 41, 96, 117 +202, 87, 41, 96, 117, 84, 40, 97, 118 +203, 84, 40, 97, 118, 81, 39, 98, 119 +204, 81, 39, 98, 119, 14, 2, 15, 45 +205, 25, 24, 60, 61, 88, 87, 117, 120 +206, 88, 87, 117, 120, 85, 84, 118, 121 +207, 85, 84, 118, 121, 82, 81, 119, 122 +208, 82, 81, 119, 122, 13, 14, 45, 46 +209, 26, 25, 61, 62, 89, 88, 120, 123 +210, 89, 88, 120, 123, 86, 85, 121, 124 +211, 86, 85, 121, 124, 83, 82, 122, 125 +212, 83, 82, 122, 125, 12, 13, 46, 47 +213, 7, 26, 62, 27, 44, 89, 123, 72 +214, 44, 89, 123, 72, 43, 86, 124, 73 +215, 43, 86, 124, 73, 42, 83, 125, 74 +216, 42, 83, 125, 74, 3, 12, 47, 11 +*NSET,NSET=PhysicalSurface1 +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +11, 12, 13, 14, 15, 16, 17, 18, 19, 20, +21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +31, 32, 33, 34, 35, 36, 37, 38, 45, 46, +47, 48, 49, 50, 51, 52, 53, 54, 55, 56, +57, 58, 59, 60, 61, 62, 63, 64, 65, 66, +67, 68, 69, 70, 71, +*NSET,NSET=PhysicalSurface2 +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +11, 12, 13, 14, 15, 16, 17, 21, 22, 23, +24, 25, 26, 27, 28, 29, 33, 34, 35, 36, +37, 38, 39, 40, 41, 42, 43, 44, 72, 73, +74, 75, 76, 77, 78, 79, 80, 81, 82, 83, +84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +94, 95, 96, 97, 98, ``` \ No newline at end of file diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl new file mode 100644 index 00000000000..6673053d88f --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -0,0 +1,108 @@ + +using Trixi +using OrdinaryDiffEq +using Downloads: download + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +@inline function initial_condition_mach2_flow(x, t, equations::CompressibleEulerEquations2D) + # set the freestream flow parameters + rho_freestream = 1.4 + v1 = 2.0 + v2 = 0.0 + p_freestream = 1.0 + + prim = SVector(rho_freestream, v1, v2, p_freestream) + return prim2cons(prim, equations) +end + +initial_condition = initial_condition_mach2_flow + +# Supersonic inflow boundary condition. +# Calculate the boundary flux entirely from the external solution state, i.e., set +# external solution state values for everything entering the domain. +@inline function boundary_condition_supersonic_inflow(u_inner, + normal_direction::AbstractVector, + x, t, surface_flux_function, + equations::CompressibleEulerEquations2D) + u_boundary = initial_condition_mach2_flow(x, t, equations) + flux = Trixi.flux(u_boundary, normal_direction, equations) + + return flux +end + +# Supersonic outflow boundary condition. +# Calculate the boundary flux entirely from the internal solution state. Analogous to supersonic inflow +# except all the solution state values are set from the internal solution as everything leaves the domain +@inline function boundary_condition_supersonic_outflow(u_inner, + normal_direction::AbstractVector, x, + t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + flux = Trixi.flux(u_inner, normal_direction, equations) + + return flux +end + +polydeg = 3 + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha + +basis = LobattoLegendreBasis(polydeg) +shock_indicator = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(shock_indicator; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +# DG Solver +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +# Mesh generated from the following gmsh geometry input file: +# https://gist.githubusercontent.com/DanielDoehring/5ade6d93629f0d8c23a598812dbee2a9/raw/d2bc904fe92146eae1a36156e7f5c535dc1a80f1/NACA6412.geo +mesh_file = joinpath(@__DIR__, "mesh_NACA6412.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/DanielDoehring/e2a389f04f1e37b33819b9637e8ee4c3/raw/4bf7607a2ce4432fdb5cb87d5e264949b11bd5d7/mesh_NACA6412.inp", + mesh_file) + +boundary_symbols = [:PhysicalLine1, :PhysicalLine2, :PhysicalLine3, :PhysicalLine4] + +mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) + +boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary + :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary + :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil + :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 4.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + stepsize_callback) + +# Run the simulation +############################################################################### +sol = solve(ode, SSPRK104(; thread = OrdinaryDiffEq.True()); + dt = 1.0, # overwritten by the `stepsize_callback` + callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl new file mode 100644 index 00000000000..bdc4da26c1f --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl @@ -0,0 +1,60 @@ + +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +polydeg = 3 +solver = DGSEM(polydeg = polydeg, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the uncurved mesh from a file (downloads the file if not available locally) + +default_mesh_file = joinpath(@__DIR__, "mesh_cube_with_boundaries.inp") +isfile(default_mesh_file) || + download("https://gist.githubusercontent.com/DanielDoehring/710eab379fe3042dc08af6f2d1076e49/raw/38e9803bc0dab9b32a61d9542feac5343c3e6f4b/mesh_cube_with_boundaries.inp", + default_mesh_file) +mesh_file = default_mesh_file + +boundary_symbols = [:PhysicalSurface1, :PhysicalSurface2] + +mesh = P4estMesh{3}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) + +boundary_conditions = Dict(:PhysicalSurface1 => BoundaryConditionDirichlet(initial_condition), + :PhysicalSurface2 => BoundaryConditionDirichlet(initial_condition)) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index 60db285e04f..c5d39ef00c0 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -289,7 +289,8 @@ end P4estMesh{NDIMS}(meshfile::String; mapping=nothing, polydeg=1, RealT=Float64, initial_refinement_level=0, unsaved_changes=true, - p4est_partition_allow_for_coarsening=true) + p4est_partition_allow_for_coarsening=true, + boundary_symbols = nothing) Main mesh constructor for the `P4estMesh` that imports an unstructured, conforming mesh from an Abaqus mesh file (`.inp`). Each element of the conforming mesh parsed @@ -310,8 +311,9 @@ To create a curved unstructured mesh `P4estMesh` two strategies are available: straight-sided from the information parsed from the `meshfile`. If a mapping function is specified then it computes the mapped tree coordinates via polynomial interpolants with degree `polydeg`. The mesh created by this function will only - have one boundary `:all`, as distinguishing different physical boundaries is - non-trivial. + have one boundary `:all` if `boundary_symbols` is not specified. + If `boundary_symbols` is specified the mesh file will be parsed for nodesets defining + the boundary nodes from which boundary edges (2D) and faces (3D) will be assigned. Note that the `mapping` and `polydeg` keyword arguments are only used by the `p4est_mesh_from_standard_abaqus` function. The `p4est_mesh_from_hohqmesh_abaqus` function obtains the mesh `polydeg` directly from the `meshfile` @@ -345,11 +347,14 @@ For example, if a two-dimensional base mesh contains 25 elements then setting - `p4est_partition_allow_for_coarsening::Bool`: Must be `true` when using AMR to make mesh adaptivity independent of domain partitioning. Should be `false` for static meshes to permit more fine-grained partitioning. +- `boundary_symbols::Vector{Symbol}`: A vector of symbols that correspond to the boundary names in the `meshfile`. + If `nothing` is passed then all boundaries are named `:all`. """ function P4estMesh{NDIMS}(meshfile::String; mapping = nothing, polydeg = 1, RealT = Float64, initial_refinement_level = 0, unsaved_changes = true, - p4est_partition_allow_for_coarsening = true) where {NDIMS} + p4est_partition_allow_for_coarsening = true, + boundary_symbols = nothing) where {NDIMS} # Prevent `p4est` from crashing Julia if the file doesn't exist @assert isfile(meshfile) @@ -373,7 +378,8 @@ function P4estMesh{NDIMS}(meshfile::String; polydeg, initial_refinement_level, NDIMS, - RealT) + RealT, + boundary_symbols) end return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, @@ -444,7 +450,8 @@ end # the `mapping` passed to this function using polynomial interpolants of degree `polydeg`. All boundary # names are given the name `:all`. function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg, - initial_refinement_level, n_dimensions, RealT) + initial_refinement_level, n_dimensions, RealT, + boundary_symbols) # Create the mesh connectivity using `p4est` connectivity = read_inp_p4est(meshfile, Val(n_dimensions)) connectivity_pw = PointerWrapper(connectivity) @@ -469,12 +476,215 @@ function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg, p4est = new_p4est(connectivity, initial_refinement_level) - # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 2 * n_dimensions, n_trees) + if boundary_symbols === nothing + # There's no simple and generic way to distinguish boundaries without any information given. + # Name all of them :all. + boundary_names = fill(:all, 2 * n_dimensions, n_trees) + else # Boundary information given + # Read in nodes belonging to boundaries + node_set_dict = parse_node_sets(meshfile, boundary_symbols) + # Read in all elements with associated nodes to specify the boundaries + element_node_matrix = parse_elements(meshfile, n_trees, n_dimensions) + + # Initialize boundary information matrix with symbol for no boundary / internal connection + boundary_names = fill(Symbol("---"), 2 * n_dimensions, n_trees) + + # Fill `boundary_names` such that it can be processed by p4est + assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + Val(n_dimensions)) + end return p4est, tree_node_coordinates, nodes, boundary_names end +function parse_elements(meshfile, n_trees, n_dims) + @assert n_dims in (2, 3) "Only 2D and 3D meshes are supported" + # Valid element types (that can be processed by p4est) based on dimension + element_types = n_dims == 2 ? + ["*ELEMENT, type=CPS4", "*ELEMENT, type=C2D4", + "*ELEMENT, type=S4"] : ["*ELEMENT, type=C3D8"] + # 2D quads: 4 nodes + element index, 3D hexes: 8 nodes + element index + expected_content_length = n_dims == 2 ? 5 : 9 + + element_node_matrix = Matrix{Int64}(undef, n_trees, expected_content_length - 1) + el_list_follows = false + tree_id = 1 + + open(meshfile, "r") do file + for line in eachline(file) + if any(startswith(line, el_type) for el_type in element_types) + el_list_follows = true + elseif el_list_follows + content = split(line, ",") + if length(content) == expected_content_length # Check that we still read in connectivity data + content_int = parse.(Int64, content) + # Add constituent nodes to the element_node_matrix. + # Important: Do not use index from the Abaqus file, but the one from p4est. + element_node_matrix[tree_id, :] = content_int[2:end] # First entry is element id + tree_id += 1 + else # Processed all elements for this ELSET + el_list_follows = false + end + end + end + end + + return element_node_matrix +end + +function parse_node_sets(meshfile, boundary_symbols) + nodes_dict = Dict{Symbol, Vector{Int64}}() + current_symbol = nothing + current_nodes = Int64[] + + open(meshfile, "r") do file + for line in eachline(file) + # Check if the line contains nodes assembled in a special set, i.e., a physical boundary + if startswith(line, "*NSET,NSET=") + # Safe the previous nodeset + if current_symbol !== nothing + nodes_dict[current_symbol] = current_nodes + end + + current_symbol = Symbol(split(line, "=")[2]) + if current_symbol in boundary_symbols + # New nodeset + current_nodes = Int64[] + else # Read only boundary node sets + current_symbol = nothing + end + elseif current_symbol !== nothing # Read only if there was already a nodeset specified + try # Check if line contains nodes + # There is always a trailing comma, remove the corresponding empty string + append!(current_nodes, parse.(Int64, split(line, ",")[1:(end - 1)])) + catch # Something different, stop reading in nodes + # If parsing fails, set current_symbol to nothing + nodes_dict[current_symbol] = current_nodes + current_symbol = nothing + end + end + end + # Safe the previous nodeset + if current_symbol !== nothing + nodes_dict[current_symbol] = current_nodes + end + end + + for symbol in boundary_symbols + if !haskey(nodes_dict, symbol) + @warn "No nodes found for nodeset :" * "$symbol" * " !" + end + end + + return nodes_dict +end + +# This function assigns the edges of elements to boundaries by +# checking if the nodes that define the edges are part of nodesets which correspond to boundaries. +function assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + ::Val{2}) # 2D version + for tree in 1:n_trees + tree_nodes = element_node_matrix[tree, :] + # For node labeling, see + # https://docs.software.vt.edu/abaqusv2022/English/SIMACAEELMRefMap/simaelm-r-2delem.htm#simaelm-r-2delem-t-nodedef1 + # and search for "Node ordering and face numbering on elements" + for boundary in keys(node_set_dict) # Loop over specified boundaries + # Check bottom edge + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] + # Bottom boundary is position 3 in p4est indexing + boundary_names[3, tree] = boundary + end + # Check right edge + if tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] + # Right boundary is position 2 in p4est indexing + boundary_names[2, tree] = boundary + end + # Check top edge + if tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] + # Top boundary is position 4 in p4est indexing + boundary_names[4, tree] = boundary + end + # Check left edge + if tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[1] in node_set_dict[boundary] + # Left boundary is position 1 in p4est indexing + boundary_names[1, tree] = boundary + end + end + end + + return boundary_names +end + +# This function assigns the edges of elements to boundaries by +# checking if the nodes that define the faces are part of nodesets which correspond to boundaries. +function assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + ::Val{3}) # 3D version + for tree in 1:n_trees + tree_nodes = element_node_matrix[tree, :] + # For node labeling, see + # https://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node26.html + for boundary in keys(node_set_dict) # Loop over specified boundaries + # Check "front face" (y_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] + # Front face is position 3 in p4est indexing + boundary_names[3, tree] = boundary + end + # Check "back face" (y_max) + if tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Front face is position 4 in p4est indexing + boundary_names[4, tree] = boundary + end + # Check "left face" (x_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Left face is position 1 in p4est indexing + boundary_names[1, tree] = boundary + end + # Check "right face" (x_max) + if tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] + # Right face is position 2 in p4est indexing + boundary_names[2, tree] = boundary + end + # Check "bottom face" (z_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] + # Bottom face is position 5 in p4est indexing + boundary_names[5, tree] = boundary + end + # Check "top face" (z_max) + if tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Top face is position 6 in p4est indexing + boundary_names[6, tree] = boundary + end + end + end + + return boundary_names +end + """ P4estMeshCubedSphere(trees_per_face_dimension, layers, inner_radius, thickness; polydeg, RealT=Float64, diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index cebc2917d52..b034091a175 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -391,6 +391,27 @@ end end end +@trixi_testset "elixir_euler_NACA6412airfoil_mach2.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_NACA6412airfoil_mach2.jl"), + l2=[ + 1.9752162683735258e-9, 3.150450205812513e-9, + 1.8885499402935914e-9, 7.273629602920966e-9, + ], + linf=[ + 6.007577890709825e-7, 1.005273289944597e-6, + 5.948514542597182e-7, 2.3111764217986774e-6, + ], + tspan=(0.0, 0.1)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_eulergravity_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulergravity_convergence.jl"), l2=[ diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index dc5d32b5a04..ea7d9193add 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -234,6 +234,29 @@ end end end +@trixi_testset "elixir_euler_free_stream_boundaries.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_free_stream_boundaries.jl"), + l2=[ + 6.530157034651212e-16, 1.6057829680004379e-15, + 3.31107455378537e-15, 3.908829498281281e-15, + 5.048390610424672e-15, + ], + linf=[ + 4.884981308350689e-15, 1.1921019726912618e-14, + 1.5432100042289676e-14, 2.298161660974074e-14, + 6.039613253960852e-14, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_euler_free_stream_extruded.jl with HLLC FLux" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_extruded.jl"), l2=[ From c021c4816b8f0d8aa81c990439771f26684f93b0 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 25 Jan 2024 14:34:19 +0100 Subject: [PATCH 079/166] set version to v0.6.7 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index f246bdfdab4..8532e47f761 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.7-pre" +version = "0.6.7" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 246dc5be204c8937f077a1d12b5603bb70138357 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 25 Jan 2024 14:34:36 +0100 Subject: [PATCH 080/166] set development version to v0.6.8-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 8532e47f761..1a0aa0103dc 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.7" +version = "0.6.8-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 70d365f405c2494e314277f22e63dcc32d04f095 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 25 Jan 2024 16:09:57 +0100 Subject: [PATCH 081/166] Fix Docs rendering: Avoid Markdown hyperlink (#1814) * Avoid hyperlink in doc * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Michael Schlottke-Lakemper * Apply suggestions from code review Co-authored-by: Michael Schlottke-Lakemper --------- Co-authored-by: Michael Schlottke-Lakemper --- docs/src/meshes/p4est_mesh.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index db75346cab3..3b35ffcad6f 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -55,7 +55,7 @@ This heading is used to indicate to the mesh constructor which of the above mapp create a curvilinear mesh. If the Abaqus file header is **not** present then the `P4estMesh` is created with the first strategy above. -#### List of corner nodes +#### [List of corner nodes](@id corner-node-list) Next, prefaced with `*NODE`, comes a list of the physical `(x,y,z)` coordinates of all the corners. The first integer in the list of the corners provides its id number. @@ -71,7 +71,7 @@ Thus, for the two-dimensional example mesh this block of corner information is 7, 3.0, -1.0, 0.0 ``` -#### List of elements +#### [List of elements](@id element-list) The element connectivity is given after the list of corners. The header for this information block is ``` @@ -237,7 +237,7 @@ For completeness, we provide the entire Abaqus mesh file for the example mesh in As an alternative to an Abaqus mesh generated by `HOHQMesh`, `.inp` files with boundary information encoded as nodesets `*NSET,NSET=` can be used to construct a `p4est` mesh. This is especially useful for usage of existing meshes (consisting of bilinear elements) which could stem from the popular [`gmsh`](https://gmsh.info/) meshing software. -In addition to the list of [nodes](#nodes) and [elements](#elements) given above, there are nodesets of the form +In addition to the list of [nodes](@ref corner-node-list) and [elements](@ref element-list) given above, there are nodesets of the form ``` *NSET,NSET=PhysicalLine1 1, 4, 52, 53, 54, 55, 56, 57, 58, From 367881bb713f8671acc1a42ae28a3d68f3000935 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Mon, 29 Jan 2024 09:29:02 +0100 Subject: [PATCH 082/166] Correct NACA6412 BC assignment (#1815) --- docs/literate/src/files/p4est_from_gmsh.jl | 8 ++++---- .../p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl | 4 ++-- test/test_p4est_2d.jl | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/literate/src/files/p4est_from_gmsh.jl b/docs/literate/src/files/p4est_from_gmsh.jl index 356339cdd47..abfe70eebc4 100644 --- a/docs/literate/src/files/p4est_from_gmsh.jl +++ b/docs/literate/src/files/p4est_from_gmsh.jl @@ -347,8 +347,8 @@ end #hide #md # // labeling of the boundary parts # Physical Line(1) = {4}; // Inflow. Label in Abaqus .inp file: PhysicalLine1 # Physical Line(2) = {2}; // Outflow. Label in Abaqus .inp file: PhysicalLine2 -# Physical Line(3) = {1, 3}; // Airfoil. Label in Abaqus .inp file: PhysicalLine3 -# Physical Line(4) = {5, 6}; //Upper and lower wall/farfield/... Label in Abaqus .inp file: PhysicalLine4 +# Physical Line(3) = {1, 3}; // Upper and lower wall/farfield/... Label in Abaqus .inp file: PhysicalLine3 +# Physical Line(4) = {5, 6}; // Airfoil. Label in Abaqus .inp file: PhysicalLine4 # ``` # which are crucial for the correct assignment of boundary conditions in `Trixi.jl`. # In particular, it is the responsibility of a user to keep track on the physical boundary names between the mesh generation and assignment of boundary condition functions in an elixir. @@ -437,8 +437,8 @@ end #hide #md # # boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary # :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary -# :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil -# :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary +# :PhysicalLine3 => boundary_condition_supersonic_outflow, # Top and bottom boundary +# :PhysicalLine4 => boundary_condition_slip_wall) # Airfoil # # semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, # boundary_conditions = boundary_conditions) diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl index 6673053d88f..fcd2ca00e10 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -79,8 +79,8 @@ mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_sy boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary - :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil - :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary + :PhysicalLine3 => boundary_condition_supersonic_outflow, # Top and bottom boundary + :PhysicalLine4 => boundary_condition_slip_wall) # Airfoil semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions = boundary_conditions) diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index b034091a175..121001b35ff 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -394,12 +394,12 @@ end @trixi_testset "elixir_euler_NACA6412airfoil_mach2.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_NACA6412airfoil_mach2.jl"), l2=[ - 1.9752162683735258e-9, 3.150450205812513e-9, - 1.8885499402935914e-9, 7.273629602920966e-9, + 0.19107654776276498, 0.3545913719444839, + 0.18492730895077583, 0.817927213517244, ], linf=[ - 6.007577890709825e-7, 1.005273289944597e-6, - 5.948514542597182e-7, 2.3111764217986774e-6, + 2.5397624311491946, 2.7075156425517917, 2.200980534211764, + 9.031153939238115, ], tspan=(0.0, 0.1)) # Ensure that we do not have excessive memory allocations From 38100d0a855b0a370c0fd65fbe9bfbe301f5d096 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Mon, 29 Jan 2024 09:44:45 +0100 Subject: [PATCH 083/166] set version to v0.6.8 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 1a0aa0103dc..29e36a46dc5 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.8-pre" +version = "0.6.8" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From fcf2652f8ce3f16ba640a66990134632c9f8d3d5 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Mon, 29 Jan 2024 09:44:57 +0100 Subject: [PATCH 084/166] set development version to v0.6.9-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 29e36a46dc5..0bbdec206d8 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.8" +version = "0.6.9-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From f4e6e494846784e513987a91d2248c04fd616029 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:07:31 +0100 Subject: [PATCH 085/166] (Thread-)Parallelize bounds check routine for subcell IDP limiting (#1736) * Implement parallel bounds check for IDP limiting * Add missing warning as "experimental" from last PR * Updating `idp_bounds_delta_threaded` for all bounds at once * Revise parallel memory structure * Using maximum instead of reduce * Expand vector length to fix False Sharing problem * Generalize stride size in vector * Add suggested comment --- src/callbacks_stage/subcell_bounds_check.jl | 10 ++- .../subcell_bounds_check_2d.jl | 81 ++++++++++++------- .../dgsem_tree/dg_2d_subcell_limiters.jl | 3 + src/solvers/dgsem_tree/subcell_limiters_2d.jl | 23 ++++-- 4 files changed, 80 insertions(+), 37 deletions(-) diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl index d7e30ab1621..9f34a6b3b4b 100644 --- a/src/callbacks_stage/subcell_bounds_check.jl +++ b/src/callbacks_stage/subcell_bounds_check.jl @@ -118,7 +118,7 @@ end @inline function finalize_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimiterIDP) (; local_minmax, positivity) = limiter - (; idp_bounds_delta) = limiter.cache + (; idp_bounds_delta_global) = limiter.cache variables = varnames(cons2cons, semi.equations) println("─"^100) @@ -128,8 +128,10 @@ end for v in limiter.local_minmax_variables_cons v_string = string(v) println("$(variables[v]):") - println("-lower bound: ", idp_bounds_delta[Symbol(v_string, "_min")][2]) - println("-upper bound: ", idp_bounds_delta[Symbol(v_string, "_max")][2]) + println("- lower bound: ", + idp_bounds_delta_global[Symbol(v_string, "_min")]) + println("- upper bound: ", + idp_bounds_delta_global[Symbol(v_string, "_max")]) end end if positivity @@ -138,7 +140,7 @@ end continue end println(string(variables[v]) * ":\n- positivity: ", - idp_bounds_delta[Symbol(string(v), "_min")][2]) + idp_bounds_delta_global[Symbol(string(v), "_min")]) end end println("─"^100 * "\n") diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl index d52eb6edb9e..545d19b5136 100644 --- a/src/callbacks_stage/subcell_bounds_check_2d.jl +++ b/src/callbacks_stage/subcell_bounds_check_2d.jl @@ -10,26 +10,37 @@ time, iter, output_directory, save_errors) (; local_minmax, positivity) = solver.volume_integral.limiter (; variable_bounds) = limiter.cache.subcell_limiter_coefficients - (; idp_bounds_delta) = limiter.cache + (; idp_bounds_delta_local, idp_bounds_delta_global) = limiter.cache + + # Note: Accessing the threaded memory vector `idp_bounds_delta_local` with + # `deviation = idp_bounds_delta_local[key][Threads.threadid()]` causes critical performance + # issues due to False Sharing. + # Initializing a vector with n times the length and using every n-th entry fixes this + # problem and allows proper scaling: + # `deviation = idp_bounds_delta_local[key][n * Threads.threadid()]` + # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` + stride_size = div(128, sizeof(eltype(u))) # = n if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) key_min = Symbol(v_string, "_min") key_max = Symbol(v_string, "_max") - deviation_min = idp_bounds_delta[key_min] - deviation_max = idp_bounds_delta[key_max] - for element in eachelement(solver, cache), j in eachnode(solver), - i in eachnode(solver) - - var = u[v, i, j, element] - deviation_min[1] = max(deviation_min[1], - variable_bounds[key_min][i, j, element] - var) - deviation_max[1] = max(deviation_max[1], - var - variable_bounds[key_max][i, j, element]) + deviation_min_threaded = idp_bounds_delta_local[key_min] + deviation_max_threaded = idp_bounds_delta_local[key_max] + @threaded for element in eachelement(solver, cache) + deviation_min = deviation_min_threaded[stride_size * Threads.threadid()] + deviation_max = deviation_max_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = u[v, i, j, element] + deviation_min = max(deviation_min, + variable_bounds[key_min][i, j, element] - var) + deviation_max = max(deviation_max, + var - variable_bounds[key_max][i, j, element]) + end + deviation_min_threaded[stride_size * Threads.threadid()] = deviation_min + deviation_max_threaded[stride_size * Threads.threadid()] = deviation_max end - deviation_min[2] = max(deviation_min[2], deviation_min[1]) - deviation_max[2] = max(deviation_max[2], deviation_max[1]) end end if positivity @@ -38,17 +49,28 @@ continue end key = Symbol(string(v), "_min") - deviation = idp_bounds_delta[key] - for element in eachelement(solver, cache), j in eachnode(solver), - i in eachnode(solver) - - var = u[v, i, j, element] - deviation[1] = max(deviation[1], - variable_bounds[key][i, j, element] - var) + deviation_threaded = idp_bounds_delta_local[key] + @threaded for element in eachelement(solver, cache) + deviation = deviation_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = u[v, i, j, element] + deviation = max(deviation, + variable_bounds[key][i, j, element] - var) + end + deviation_threaded[stride_size * Threads.threadid()] = deviation end - deviation[2] = max(deviation[2], deviation[1]) end end + + for (key, _) in idp_bounds_delta_local + # Calculate maximum deviations of all threads + idp_bounds_delta_local[key][stride_size] = maximum(idp_bounds_delta_local[key][stride_size * i] + for i in 1:Threads.nthreads()) + # Update global maximum deviations + idp_bounds_delta_global[key] = max(idp_bounds_delta_global[key], + idp_bounds_delta_local[key][stride_size]) + end + if save_errors # Print to output file open("$output_directory/deviations.txt", "a") do f @@ -56,8 +78,10 @@ if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) - print(f, ", ", idp_bounds_delta[Symbol(v_string, "_min")][1], ", ", - idp_bounds_delta[Symbol(v_string, "_max")][1]) + print(f, ", ", + idp_bounds_delta_local[Symbol(v_string, "_min")][stride_size], + ", ", + idp_bounds_delta_local[Symbol(v_string, "_max")][stride_size]) end end if positivity @@ -65,14 +89,17 @@ if v in limiter.local_minmax_variables_cons continue end - print(f, ", ", idp_bounds_delta[Symbol(string(v), "_min")][1]) + print(f, ", ", + idp_bounds_delta_local[Symbol(string(v), "_min")][stride_size]) end end println(f) end - # Reset first entries of idp_bounds_delta - for (key, _) in idp_bounds_delta - idp_bounds_delta[key][1] = zero(eltype(idp_bounds_delta[key][1])) + # Reset local maximum deviations + for (key, _) in idp_bounds_delta_local + for i in 1:Threads.nthreads() + idp_bounds_delta_local[key][stride_size * i] = zero(eltype(idp_bounds_delta_local[key][stride_size])) + end end end diff --git a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl index 2fc62f548d2..9af8b65b4cd 100644 --- a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl +++ b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl @@ -470,6 +470,9 @@ end For subcell limiting, the calculation of local bounds for non-periodic domains require the boundary outer state. This function returns the boundary value at time `t` and for node with spatial indices `indices`. + +!!! warning "Experimental implementation" + This is an experimental feature and may change in future releases. """ @inline function get_boundary_outer_state(boundary_condition::BoundaryConditionDirichlet, cache, t, equations, dg, indices...) diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 384f4178bc9..3d272359fe4 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -13,21 +13,32 @@ function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquat bound_keys) # Memory for bounds checking routine with `BoundsCheckCallback`. - # The first entry of each vector contains the maximum deviation since the last export. - # The second one contains the total maximum deviation. - idp_bounds_delta = Dict{Symbol, Vector{real(basis)}}() + # Local variable contains the maximum deviation since the last export. + # Using a threaded vector to parallelize bounds check. + idp_bounds_delta_local = Dict{Symbol, Vector{real(basis)}}() + # Global variable contains the total maximum deviation. + idp_bounds_delta_global = Dict{Symbol, real(basis)}() + # Note: False sharing causes critical performance issues on multiple threads when using a vector + # of length `Threads.nthreads()`. Initializing a vector of length `n * Threads.nthreads()` + # and then only using every n-th entry, fixes the problem and allows proper scaling. + # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` + stride_size = div(128, sizeof(eltype(basis.nodes))) # = n for key in bound_keys - idp_bounds_delta[key] = zeros(real(basis), 2) + idp_bounds_delta_local[key] = [zero(real(basis)) + for _ in 1:(stride_size * Threads.nthreads())] + idp_bounds_delta_global[key] = zero(real(basis)) end - return (; subcell_limiter_coefficients, idp_bounds_delta) + return (; subcell_limiter_coefficients, idp_bounds_delta_local, + idp_bounds_delta_global) end function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSEM, t, dt; kwargs...) @unpack alpha = limiter.cache.subcell_limiter_coefficients - alpha .= zero(eltype(alpha)) + # TODO: Do not abuse `reset_du!` but maybe implement a generic `set_zero!` + @trixi_timeit timer() "reset alpha" reset_du!(alpha, dg, semi.cache) if limiter.local_minmax @trixi_timeit timer() "local min/max limiting" idp_local_minmax!(alpha, limiter, From 4b07706f01cc17c7ef6215c21da312e221cb0c00 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 30 Jan 2024 20:32:24 +0100 Subject: [PATCH 086/166] Do not safe sol (#1820) --- examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl index fcd2ca00e10..7e55a259596 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -103,6 +103,7 @@ callbacks = CallbackSet(summary_callback, ############################################################################### sol = solve(ode, SSPRK104(; thread = OrdinaryDiffEq.True()); dt = 1.0, # overwritten by the `stepsize_callback` + save_everystep = false, callback = callbacks); summary_callback() # print the timer summary From 07990295b7ba155669ef4c2953809abe8e3c4880 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 31 Jan 2024 13:21:11 +0000 Subject: [PATCH 087/166] Sc/converters coupling (#1558) * Added coupling converters. * Added generic converter_function for structured 2d meshes. * Added example elixir for coupling converters. * Cleaned up converter coupling elixir. * Added equations in coupling converters. * Added converter functions. * Added identity converter function. * Autoformat for converter coupling implementation. * Added coupled converter elixir. * Corrected file name of coupled converters test. * Removed redundant doc string. * Added function signature in doc string. * Removed coverage_override in coupled tests. * Removed old commented code. * Update make.jl Added interface coupling docs to the main menu. * Update make.jl Moved converter coupling section. * Create coupling.md * Update coupling.md Added some documentation on coupling converters. * Removed troublesome AnalysisCallbackCoupled from test. * Chenged coupling converter function. * Changed coupling converter function and updated tests. * Sepcialized coupling function call. * Removed volume coupling from documentation to avoit confusion. * Update src/coupling_converters/coupling_converters.jl Co-authored-by: Hendrik Ranocha * Removed redundant converter function for coupling. * Removed redundant coupling converter file mentioned in some files. * Autoreformatted. * Removed old coupled elixir and replaced it with one using converter functions. * Updated errors for coupled tests. * Corrected test results for coupled equations. * Corrected comment. * Removed coupled test from special tests. * Removed coupled test from specials. * Chaned the coupling function to the identity. * Updated coupling tests. * Updated errors for coupled test. * Added advice about binary compatability for coupled equations in the documentation. * Typo. * Added numerical fluxes. * Corrected rs copy routine. Now loop over this semi's components. * Reformatted equations source file. * Removed problemating include of time_integration.jl. * Removed export of deleted methods. * Reverted to old version of compressible Euler multicomponent with no support for structured grid. * Renamed documentation file for multi-physics coupling. * Renamed doc reference. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Reinstated structured_2d_dgsem coupled in special tests. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Renamed CouplingFunction to CouplingConverter. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Cleaned the copy of coupled boundary values. * Reduced time span for example coupling elixir. * Removed redundant loop. * Applied formatter. * Removed default coupling covnerter function. * Moved coupling converter function into elixir. * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/make.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Removed coupling_converters.jl from the include. * Corrected introduced issue with coupling boundary copy. The latest change to clean up the boundary copying introduced a bug related to the determination of the wrong node indices. This is now corrected. * Corrected comment on final simulation time. * Updated errors for coupled test to reflect changed final simulation time. * Added miladd. * Corrected coordinate finding in semidiscretization_coupled. * Fixed issued related to memory allocation. * Corrected loop over semidiscretization. * Removed commented out code. * Fixed type instability with loops over semidiscretizations using lispy tuple programming. * Removed obsolete code. * Fixed another typa instability in coupled semidiscretization. * Cleaning up of the coupled semidiscretization. * Autoformatted coupled semidiscretization. * Fixed last type instability in coupling. * Autoformatter on semidiscretization. * Fixed bug in boundary values copy that arose when coupling multiple systems. * aplpied autoformatter on coupled semidiscretization. * Extended the structured 2d example elixir for the coupled advection to 4 semidiscretizations. This hase two purpuses: 1. Users are given an example fro 2d coupling avoiding common pitfalls. 2. This increases the code coverege for the test. * Updated test results for coupled advection in 2d to reflect the 4 semidiscretizations that are now used. * Added correct errors for tests for the coupled adveciton equations in structured 2d. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Corrected foreach_enumerate implementation. * Fix closing parens * Remove unused recursive rhs! * Pass equations to converter function * Apply formatting * Reverted copy_to_coupled_boundary to previou version to avoid type instability. * Corrected computation of coupled semidiscretizations and fixed memory issue. * Removed redundant nelements function, as it is no longer used. * Applied autoformatter. * Improvements in style and added info about passing equations to coupling functions, as suggested by Andrew and Daniel. * Restored timings in semidiscretization coupled. --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> --- docs/make.jl | 1 + docs/src/multi-physics_coupling.md | 46 +++++ .../elixir_advection_coupled.jl | 191 +++++++++++++----- .../semidiscretization_coupled.jl | 191 ++++++++++++------ test/test_structured_2d.jl | 28 ++- 5 files changed, 340 insertions(+), 117 deletions(-) create mode 100644 docs/src/multi-physics_coupling.md diff --git a/docs/make.jl b/docs/make.jl index dee87371bd1..7fce3b31e24 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -108,6 +108,7 @@ makedocs( ], "Time integration" => "time_integration.md", "Callbacks" => "callbacks.md", + "Coupling" => "multi-physics_coupling.md" ], "Advanced topics & developers" => [ "Conventions" =>"conventions.md", diff --git a/docs/src/multi-physics_coupling.md b/docs/src/multi-physics_coupling.md new file mode 100644 index 00000000000..eec92bc21de --- /dev/null +++ b/docs/src/multi-physics_coupling.md @@ -0,0 +1,46 @@ +# [Multi-physics coupling](@id multi-physics-coupling) +A complex simulation can consist of different spatial domains in which +different equations are being solved, different numerical methods being used +or the grid structure is different. +One example would be a fluid in a tank and an extended hot plate attached to it. +We would then like to solve the Navier-Stokes equations in the fluid domain +and the heat conduction equations in the plate. +The coupling would happen at the interface through the exchange of thermal energy. + + +## Converter coupling +It may happen that the two systems to be coupled do not share any variables, but +share some of the physics. +In such a situation, the same physics is just represented in a different form and with +a different set of variables. +This is the case, for instance assuming two domains, if there is a fluid system in one domain +and a Vlasov system in the other domain. +In that case we would have variables representing distribution functions of +the Vlasov system on one side and variables representing the mechanical quantities, like density, +of the fluid system. +To translate the fields from one description to the other one needs to use +converter functions. +These functions need to be hand tailored by the user in the elixir file where each +pair of coupled systems requires two coupling functions, one for each direction. + +In the general case, we have a system $A$ with $m$ variables +$u_{A,i}, \: i = 1, \dots, m$ and another +system $B$ with $n$ variables $u_{B,j}, \: j = 1, \dots, n$. +We then define two coupling functions, one that transforms $u_A$ into $u_B$ +and one that goes the other way. + +In their minimal form they take the position vector $x$, state vector $u$ +and the equations of the two coupled systems +and return the transformed variables. +By passing the equations we can make use of their parameters, if they are required. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled.jl`. + + +## Warning about binary compatibility +Currently the coordinate values on the nodes can differ by machine precision when +simulating the mesh and when splitting the mesh in multiple domains. +This is an issue coming from the coordinate interpolation on the nodes. +As a result, running a simulation in a single system and in two coupled domains +may result in a difference of the order of the machine precision. +While this is not an issue for most practical problems, it is best to keep this in mind when comparing test runs. + diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 2a56d23f4c0..43b68f21b03 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -2,31 +2,38 @@ using OrdinaryDiffEq using Trixi ############################################################################### -# Coupled semidiscretization of two linear advection systems, which are connected periodically +# Coupled semidiscretization of four linear advection systems using converter functions such that +# they are also coupled across the domain boundaries to generate a periodic system. # -# In this elixir, we have a square domain that is divided into a left half and a right half. On each -# half of the domain, a completely independent SemidiscretizationHyperbolic is created for the -# linear advection equations. The two systems are coupled in the x-direction and have periodic -# boundaries in the y-direction. For a high-level overview, see also the figure below: +# In this elixir, we have a square domain that is divided into a upper-left, lower-left, +# upper-right and lower-right quarter. On each quarter +# of the domain, a completely independent SemidiscretizationHyperbolic is created for the +# linear advection equations. The four systems are coupled in the x and y-direction. +# For a high-level overview, see also the figure below: # # (-1, 1) ( 1, 1) # ┌────────────────────┬────────────────────┐ -# │ ↑ periodic ↑ │ ↑ periodic ↑ │ -# │ │ │ +# │ ↑ coupled ↑ │ ↑ coupled ↑ │ # │ │ │ # │ ========= │ ========= │ # │ system #1 │ system #2 │ # │ ========= │ ========= │ # │ │ │ +# │<-- coupled │<-- coupled │ +# │ coupled -->│ coupled -->│ # │ │ │ +# │ ↓ coupled ↓ │ ↓ coupled ↓ │ +# ├────────────────────┼────────────────────┤ +# │ ↑ coupled ↑ │ ↑ coupled ↑ │ # │ │ │ +# │ ========= │ ========= │ +# │ system #3 │ system #4 │ +# │ ========= │ ========= │ # │ │ │ -# │ coupled -->│<-- coupled │ -# │ │ │ -# │<-- coupled │ coupled -->│ -# │ │ │ +# │<-- coupled │<-- coupled │ +# │ coupled -->│ coupled -->│ # │ │ │ -# │ ↓ periodic ↓ │ ↓ periodic ↓ │ +# │ ↓ coupled ↓ │ ↓ coupled ↓ │ # └────────────────────┴────────────────────┘ # (-1, -1) ( 1, -1) @@ -36,60 +43,135 @@ equations = LinearScalarAdvectionEquation2D(advection_velocity) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) -# First mesh is the left half of a [-1,1]^2 square -coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +# This will be the number of elements for each quarter/semidiscretization. +cells_per_dimension = (8, 8) + +########### +# system #1 +########### + +coordinates_min1 = (-1.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) -# Define identical resolution as a variable such that it is easier to change from `trixi_include` -cells_per_dimension = (8, 16) +mesh1 = StructuredMesh(cells_per_dimension, coordinates_min1, coordinates_max1) -cells_per_dimension1 = cells_per_dimension +# Define the coupling functions +coupling_function12 = (x, u, equations_other, equations_own) -> u +coupling_function13 = (x, u, equations_other, equations_own) -> u -mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, + coupling_function12) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, + coupling_function12) +boundary_conditions_y_neg1 = BoundaryConditionCoupled(3, (:i_forward, :end), Float64, + coupling_function13) +boundary_conditions_y_pos1 = BoundaryConditionCoupled(3, (:i_forward, :begin), Float64, + coupling_function13) # A semidiscretization collects data structures and functions for the spatial discretization semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, - boundary_conditions = ( - # Connect left boundary with right boundary of right mesh - x_neg = BoundaryConditionCoupled(2, - (:end, - :i_forward), - Float64), - # Connect right boundary with left boundary of right mesh - x_pos = BoundaryConditionCoupled(2, - (:begin, - :i_forward), - Float64), - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Second mesh is the right half of a [-1,1]^2 square -coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) + boundary_conditions = (x_neg = boundary_conditions_x_neg1, + x_pos = boundary_conditions_x_pos1, + y_neg = boundary_conditions_y_neg1, + y_pos = boundary_conditions_y_pos1)) + +########### +# system #2 +########### + +coordinates_min2 = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) -cells_per_dimension2 = cells_per_dimension +mesh2 = StructuredMesh(cells_per_dimension, coordinates_min2, coordinates_max2) -mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) +# Define the coupling functions +coupling_function21 = (x, u, equations_other, equations_own) -> u +coupling_function24 = (x, u, equations_other, equations_own) -> u +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, + coupling_function21) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, + coupling_function21) +boundary_conditions_y_neg2 = BoundaryConditionCoupled(4, (:i_forward, :end), Float64, + coupling_function24) +boundary_conditions_y_pos2 = BoundaryConditionCoupled(4, (:i_forward, :begin), Float64, + coupling_function24) + +# A semidiscretization collects data structures and functions for the spatial discretization semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, - boundary_conditions = ( - # Connect left boundary with right boundary of left mesh - x_neg = BoundaryConditionCoupled(1, - (:end, - :i_forward), - Float64), - # Connect right boundary with left boundary of left mesh - x_pos = BoundaryConditionCoupled(1, - (:begin, - :i_forward), - Float64), - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Create a semidiscretization that bundles semi1 and semi2 -semi = SemidiscretizationCoupled(semi1, semi2) + boundary_conditions = (x_neg = boundary_conditions_x_neg2, + x_pos = boundary_conditions_x_pos2, + y_neg = boundary_conditions_y_neg2, + y_pos = boundary_conditions_y_pos2)) + +########### +# system #3 +########### + +coordinates_min3 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max3 = (0.0, 0.0) # maximum coordinates (max(x), max(y)) + +mesh3 = StructuredMesh(cells_per_dimension, coordinates_min3, coordinates_max3) + +# Define the coupling functions +coupling_function34 = (x, u, equations_other, equations_own) -> u +coupling_function31 = (x, u, equations_other, equations_own) -> u + +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg3 = BoundaryConditionCoupled(4, (:end, :i_forward), Float64, + coupling_function34) +boundary_conditions_x_pos3 = BoundaryConditionCoupled(4, (:begin, :i_forward), Float64, + coupling_function34) +boundary_conditions_y_neg3 = BoundaryConditionCoupled(1, (:i_forward, :end), Float64, + coupling_function31) +boundary_conditions_y_pos3 = BoundaryConditionCoupled(1, (:i_forward, :begin), Float64, + coupling_function31) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi3 = SemidiscretizationHyperbolic(mesh3, equations, initial_condition_convergence_test, + solver, + boundary_conditions = (x_neg = boundary_conditions_x_neg3, + x_pos = boundary_conditions_x_pos3, + y_neg = boundary_conditions_y_neg3, + y_pos = boundary_conditions_y_pos3)) + +########### +# system #4 +########### + +coordinates_min4 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max4 = (1.0, 0.0) # maximum coordinates (max(x), max(y)) + +mesh4 = StructuredMesh(cells_per_dimension, coordinates_min4, coordinates_max4) + +# Define the coupling functions +coupling_function43 = (x, u, equations_other, equations_own) -> u +coupling_function42 = (x, u, equations_other, equations_own) -> u + +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg4 = BoundaryConditionCoupled(3, (:end, :i_forward), Float64, + coupling_function43) +boundary_conditions_x_pos4 = BoundaryConditionCoupled(3, (:begin, :i_forward), Float64, + coupling_function43) +boundary_conditions_y_neg4 = BoundaryConditionCoupled(2, (:i_forward, :end), Float64, + coupling_function42) +boundary_conditions_y_pos4 = BoundaryConditionCoupled(2, (:i_forward, :begin), Float64, + coupling_function42) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi4 = SemidiscretizationHyperbolic(mesh4, equations, initial_condition_convergence_test, + solver, + boundary_conditions = (x_neg = boundary_conditions_x_neg4, + x_pos = boundary_conditions_x_pos4, + y_neg = boundary_conditions_y_neg4, + y_pos = boundary_conditions_y_pos4)) + +# Create a semidiscretization that bundles all the semidiscretizations. +semi = SemidiscretizationCoupled(semi1, semi2, semi3, semi4) ############################################################################### # ODE solvers, callbacks etc. @@ -104,7 +186,10 @@ summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results analysis_callback1 = AnalysisCallback(semi1, interval = 100) analysis_callback2 = AnalysisCallback(semi2, interval = 100) -analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) +analysis_callback3 = AnalysisCallback(semi3, interval = 100) +analysis_callback4 = AnalysisCallback(semi4, interval = 100) +analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2, + analysis_callback3, analysis_callback4) # The SaveSolutionCallback allows to save the solution to a file in regular intervals save_solution = SaveSolutionCallback(interval = 100, diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 0941ae6a8ca..dc21dbe9a1e 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -1,3 +1,10 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + """ SemidiscretizationCoupled @@ -65,11 +72,13 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationCoupled) summary_line(io, "system", i) mesh, equations, solver, _ = mesh_equations_solver_cache(semi.semis[i]) summary_line(increment_indent(io), "mesh", mesh |> typeof |> nameof) - summary_line(increment_indent(io), "equations", equations |> typeof |> nameof) + summary_line(increment_indent(io), "equations", + equations |> typeof |> nameof) summary_line(increment_indent(io), "initial condition", semi.semis[i].initial_condition) # no boundary conditions since that could be too much - summary_line(increment_indent(io), "source terms", semi.semis[i].source_terms) + summary_line(increment_indent(io), "source terms", + semi.semis[i].source_terms) summary_line(increment_indent(io), "solver", solver |> typeof |> nameof) end summary_line(io, "total #DOFs per field", ndofs(semi)) @@ -106,20 +115,14 @@ end @inline Base.real(semi::SemidiscretizationCoupled) = promote_type(real.(semi.semis)...) -@inline Base.eltype(semi::SemidiscretizationCoupled) = promote_type(eltype.(semi.semis)...) +@inline function Base.eltype(semi::SemidiscretizationCoupled) + promote_type(eltype.(semi.semis)...) +end @inline function ndofs(semi::SemidiscretizationCoupled) sum(ndofs, semi.semis) end -@inline function nelements(semi::SemidiscretizationCoupled) - return sum(semi.semis) do semi_ - mesh, equations, solver, cache = mesh_equations_solver_cache(semi_) - - nelements(mesh, solver, cache) - end -end - function compute_coefficients(t, semi::SemidiscretizationCoupled) @unpack u_indices = semi @@ -137,23 +140,40 @@ end @view u_ode[semi.u_indices[index]] end +# Same as `foreach(enumerate(something))`, but without allocations. +# +# Note that compile times may increase if this is used with big tuples. +@inline foreach_enumerate(func, collection) = foreach_enumerate(func, collection, 1) +@inline foreach_enumerate(func, collection::Tuple{}, index) = nothing + +@inline function foreach_enumerate(func, collection, index) + element = first(collection) + remaining_collection = Base.tail(collection) + + func((index, element)) + + # Process remaining collection + foreach_enumerate(func, remaining_collection, index + 1) +end + function rhs!(du_ode, u_ode, semi::SemidiscretizationCoupled, t) @unpack u_indices = semi time_start = time_ns() @trixi_timeit timer() "copy to coupled boundaries" begin - for semi_ in semi.semis - copy_to_coupled_boundary!(semi_.boundary_conditions, u_ode, semi) + foreach(semi.semis) do semi_ + copy_to_coupled_boundary!(semi_.boundary_conditions, u_ode, semi, semi_) end end # Call rhs! for each semidiscretization - for i in eachsystem(semi) - u_loc = get_system_u_ode(u_ode, i, semi) - du_loc = get_system_u_ode(du_ode, i, semi) - - @trixi_timeit timer() "system #$i" rhs!(du_loc, u_loc, semi.semis[i], t) + @trixi_timeit timer() "copy to coupled boundaries" begin + foreach_enumerate(semi.semis) do (i, semi_) + u_loc = get_system_u_ode(u_ode, i, semi) + du_loc = get_system_u_ode(du_ode, i, semi) + rhs!(du_loc, u_loc, semi_, t) + end end runtime = time_ns() - time_start @@ -309,7 +329,8 @@ end for i in eachsystem(semi) u_ode_slice = get_system_u_ode(u_ode, i, semi) - save_solution_file(semis[i], u_ode_slice, solution_callback, integrator, system = i) + save_solution_file(semis[i], u_ode_slice, solution_callback, integrator, + system = i) end end @@ -332,7 +353,7 @@ end ################################################################################ """ - BoundaryConditionCoupled(other_semi_index, indices, uEltype) + BoundaryConditionCoupled(other_semi_index, indices, uEltype, coupling_converter) Boundary condition to glue two meshes together. Solution values at the boundary of another mesh will be used as boundary values. This requires the use @@ -348,32 +369,37 @@ This is currently only implemented for [`StructuredMesh`](@ref). - `indices::Tuple`: node/cell indices at the boundary of the mesh in the other semidiscretization. See examples below. - `uEltype::Type`: element type of solution +- `coupling_converter::CouplingConverter`: function to call for converting the solution + state of one system to the other system # Examples ```julia # Connect the left boundary of mesh 2 to our boundary such that our positive # boundary direction will match the positive y direction of the other boundary -BoundaryConditionCoupled(2, (:begin, :i), Float64) +BoundaryConditionCoupled(2, (:begin, :i), Float64, fun) # Connect the same two boundaries oppositely oriented -BoundaryConditionCoupled(2, (:begin, :i_backwards), Float64) +BoundaryConditionCoupled(2, (:begin, :i_backwards), Float64, fun) # Using this as y_neg boundary will connect `our_cells[i, 1, j]` to `other_cells[j, end-i, end]` -BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64) +BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64, fun) ``` !!! warning "Experimental code" This is an experimental feature and can change any time. """ -mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices} +mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices, + CouplingConverter} # NDIMST2M1 == NDIMS * 2 - 1 # Buffer for boundary values: [variable, nodes_i, nodes_j, cell_i, cell_j] - u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 - other_semi_index :: Int - other_orientation :: Int - indices :: Indices - - function BoundaryConditionCoupled(other_semi_index, indices, uEltype) + u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 + other_semi_index :: Int + other_orientation :: Int + indices :: Indices + coupling_converter :: CouplingConverter + + function BoundaryConditionCoupled(other_semi_index, indices, uEltype, + coupling_converter) NDIMS = length(indices) u_boundary = Array{uEltype, NDIMS * 2 - 1}(undef, ntuple(_ -> 0, NDIMS * 2 - 1)) @@ -385,8 +411,10 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic other_orientation = 3 end - new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices)}(u_boundary, other_semi_index, - other_orientation, indices) + new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices), + typeof(coupling_converter)}(u_boundary, + other_semi_index, other_orientation, + indices, coupling_converter) end end @@ -395,8 +423,10 @@ function Base.eltype(boundary_condition::BoundaryConditionCoupled) end function (boundary_condition::BoundaryConditionCoupled)(u_inner, orientation, direction, - cell_indices, surface_node_indices, - surface_flux_function, equations) + cell_indices, + surface_node_indices, + surface_flux_function, + equations) # get_node_vars(boundary_condition.u_boundary, equations, solver, surface_node_indices..., cell_indices...), # but we don't have a solver here u_boundary = SVector(ntuple(v -> boundary_condition.u_boundary[v, @@ -421,13 +451,15 @@ function allocate_coupled_boundary_conditions(semi::AbstractSemidiscretization) for direction in 1:n_boundaries boundary_condition = semi.boundary_conditions[direction] - allocate_coupled_boundary_condition(boundary_condition, direction, mesh, equations, + allocate_coupled_boundary_condition(boundary_condition, direction, mesh, + equations, solver) end end # Don't do anything for other BCs than BoundaryConditionCoupled -function allocate_coupled_boundary_condition(boundary_condition, direction, mesh, equations, +function allocate_coupled_boundary_condition(boundary_condition, direction, mesh, + equations, solver) return nothing end @@ -448,43 +480,69 @@ function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditi end # Don't do anything for other BCs than BoundaryConditionCoupled -function copy_to_coupled_boundary!(boundary_condition, u_ode, semi) +function copy_to_coupled_boundary!(boundary_condition, u_ode, semi_coupled, semi) return nothing end +function copy_to_coupled_boundary!(u_ode, semi_coupled, semi, i, n_boundaries, + boundary_condition, boundary_conditions...) + copy_to_coupled_boundary!(boundary_condition, u_ode, semi_coupled, semi) + if i < n_boundaries + copy_to_coupled_boundary!(u_ode, semi_coupled, semi, i + 1, n_boundaries, + boundary_conditions...) + end +end + function copy_to_coupled_boundary!(boundary_conditions::Union{Tuple, NamedTuple}, u_ode, - semi) - for boundary_condition in boundary_conditions - copy_to_coupled_boundary!(boundary_condition, u_ode, semi) + semi_coupled, semi) + copy_to_coupled_boundary!(u_ode, semi_coupled, semi, 1, length(boundary_conditions), + boundary_conditions...) +end + +function mesh_equations_solver_cache(other_semi_index, i, semi_, semi_tuple...) + if i == other_semi_index + return mesh_equations_solver_cache(semi_) + else + # Walk through semidiscretizations until we find `i` + mesh_equations_solver_cache(other_semi_index, i + 1, semi_tuple...) end end # In 2D -function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{2}, u_ode, - semi) - @unpack u_indices = semi +function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{2}, + u_ode, + semi_coupled, semi) + @unpack u_indices = semi_coupled @unpack other_semi_index, other_orientation, indices = boundary_condition + @unpack coupling_converter, u_boundary = boundary_condition + + mesh_own, equations_own, solver_own, cache_own = mesh_equations_solver_cache(semi) + + mesh_other, equations_other, solver_other, cache_other = mesh_equations_solver_cache(other_semi_index, + 1, + semi_coupled.semis...) - mesh, equations, solver, cache = mesh_equations_solver_cache(semi.semis[other_semi_index]) - u = wrap_array(get_system_u_ode(u_ode, other_semi_index, semi), mesh, equations, solver, - cache) + node_coordinates_other = cache_other.elements.node_coordinates + u_ode_other = get_system_u_ode(u_ode, other_semi_index, semi_coupled) + u_other = wrap_array(u_ode_other, mesh_other, equations_other, solver_other, + cache_other) - linear_indices = LinearIndices(size(mesh)) + linear_indices = LinearIndices(size(mesh_other)) if other_orientation == 1 - cells = axes(mesh, 2) + cells = axes(mesh_other, 2) else # other_orientation == 2 - cells = axes(mesh, 1) + cells = axes(mesh_other, 1) end # Copy solution data to the coupled boundary using "delayed indexing" with # a start value and a step size to get the correct face and orientation. - node_index_range = eachnode(solver) + node_index_range = eachnode(solver_other) i_node_start, i_node_step = index_to_start_step_2d(indices[1], node_index_range) j_node_start, j_node_step = index_to_start_step_2d(indices[2], node_index_range) - i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh, 1)) - j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh, 2)) + i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh_other, 1)) + j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh_other, 2)) i_cell = i_cell_start j_cell = j_cell_start @@ -492,16 +550,26 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ for cell in cells i_node = i_node_start j_node = j_node_start - - for i in eachnode(solver) - for v in 1:size(u, 1) - boundary_condition.u_boundary[v, i, cell] = u[v, i_node, j_node, - linear_indices[i_cell, - j_cell]] + element_id = linear_indices[i_cell, j_cell] + + for element_id in eachnode(solver_other) + x_other = get_node_coords(node_coordinates_other, equations_other, + solver_other, + i_node, j_node, linear_indices[i_cell, j_cell]) + u_node_other = get_node_vars(u_other, equations_other, solver_other, i_node, + j_node, linear_indices[i_cell, j_cell]) + u_node_converted = coupling_converter(x_other, u_node_other, + equations_other, + equations_own) + + for i in eachindex(u_node_converted) + u_boundary[i, element_id, cell] = u_node_converted[i] end + i_node += i_node_step j_node += j_node_step end + i_cell += i_cell_step j_cell += j_cell_step end @@ -511,7 +579,8 @@ end ### DGSEM/structured ################################################################################ -@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, orientation, +@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, + orientation, boundary_condition::BoundaryConditionCoupled, mesh::StructuredMesh, equations, surface_integral, dg::DG, cache, @@ -531,7 +600,8 @@ end sign_jacobian = sign(inverse_jacobian[node_indices..., element]) # Contravariant vector Ja^i is the normal vector - normal = sign_jacobian * get_contravariant_vector(orientation, contravariant_vectors, + normal = sign_jacobian * + get_contravariant_vector(orientation, contravariant_vectors, node_indices..., element) # If the mapping is orientation-reversing, the normal vector will be reversed (see above). @@ -608,3 +678,4 @@ function analyze_convergence(errors_coupled, iterations, return eoc_mean_values end +end # @muladd diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index e5d45ebcc07..522510a42e3 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -33,14 +33,34 @@ end @trixi_testset "elixir_advection_coupled.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), - l2=[7.816742843181738e-6, 7.816742843196112e-6], - linf=[6.314906965543265e-5, 6.314906965410039e-5], + l2=[ + 7.816742843336293e-6, + 7.816742843340186e-6, + 7.816742843025513e-6, + 7.816742843061526e-6, + ], + linf=[ + 6.314906965276812e-5, + 6.314906965187994e-5, + 6.31490696496595e-5, + 6.314906965032563e-5, + ], coverage_override=(maxiters = 10^5,)) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin errors = analysis_callback(sol) - @test errors.l2≈[7.816742843181738e-6, 7.816742843196112e-6] rtol=1.0e-4 - @test errors.linf≈[6.314906965543265e-5, 6.314906965410039e-5] rtol=1.0e-4 + @test errors.l2≈[ + 7.816742843336293e-6, + 7.816742843340186e-6, + 7.816742843025513e-6, + 7.816742843061526e-6, + ] rtol=1.0e-4 + @test errors.linf≈[ + 6.314906965276812e-5, + 6.314906965187994e-5, + 6.31490696496595e-5, + 6.314906965032563e-5, + ] rtol=1.0e-4 # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let From d53fdb59fe71c1ba956fc758b5874c3914304ddf Mon Sep 17 00:00:00 2001 From: Johannes Markert <10619309+jmark@users.noreply.github.com> Date: Wed, 31 Jan 2024 16:49:35 +0100 Subject: [PATCH 088/166] Feature: T8codeMesh backend with MPI support (#1803) * Initial commit for the new feature using t8code as meshing backend. * Delete t8code_2d_dgsem * Added new examples and tests. Testing updates for T8code.jl. * Worked in the comments. * Fixed spelling. * Update src/auxiliary/auxiliary.jl Co-authored-by: Hendrik Ranocha * Added whitespace in Unions. * Adapted commented out code block reporting the no. of elements per level. * Added dummy save mesh support for . * Added test . * Added to method signature. * Deleted unnecessary comments. * Removed commented out tests. * Fixed Morton ordering bug in 2D at mortar interfaces. * Disabled `save_solution` callbacks and added more tests. * Added more tests. * Updated code according to the review. * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dgsem_t8code/containers_2d.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Code cleanup. * Updated to T8code@0.3.0 * Fixing minor issues. * Fixed typo. * Code cleanup. * Enabled `set_ghost` in examples. * Generalized type info in function signature. * Added namespace qualifier. * Updated comments. * Refactored code and deleted lots of it. * Removed a copy operation. * Initial commit. * Fxinig minor bugs. * Fixed minor typo. * Added first 3d example and fixed segfault. * Added many 3D examples and tests. * Backup. * Fixed merging issues. * Adding more tests. * Fixed some merging issues and formatting. * Fixed spelling. * Fixed spelling and changed assert macro. * Applied automatic formatting. * Applied automatic formatting. * Backup. * Removed superfluous outer constructor for T8codeMesh. * Added return statement for consistency. * Fixed wrong indentation by autoformatter. * Added comments. * Made sure an exception is thrown. * Changed flags for sc_init for t8code initialization. * Updated formatting. * Workaround for error about calling MPI routines after MPI has been finalized. * Upped to T8code v0.4.1. * Added mpi_finailize_hook for proper memory cleanup. * Added t8code to test_threaded.jl * Added a `save_mesh_file` call in order to satisfy code coverage. * Improved finalizer logic for T8coeMesh. * Refined code. * Restructured to do blocks. * Moved save_mesh_file call to test file. * Fixed spelling error. * Made sc_finalize optional. * Fixed spelling. * Cleaned up examples. * Updated and cleaned t8code solver codes. * Updated tests for t8code 3D code. * Fixed spelling. * Update elixir_euler_source_terms_nonconforming_unstructured_curved.jl * Update elixir_euler_source_terms_nonconforming_unstructured_curved.jl * Fixed indentation. * Update src/solvers/dgsem_structured/dg_3d.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dgsem_t8code/containers_3d.jl Co-authored-by: Andrew Winters * Update src/callbacks_step/amr_dg3d.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_euler_ec.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl Co-authored-by: Andrew Winters * Update src/solvers/dgsem_structured/dg_3d.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/callbacks_step/analysis_dg3d.jl Co-authored-by: Hendrik Ranocha * Update examples/t8code_3d_dgsem/elixir_euler_free_stream.jl Co-authored-by: Andrew Winters * Removed NDIMS from T8codeMesh construction in case of p4est/p8est connectivity input. * Aligned T8codeMesh constructur with other mesh constructors. * Update examples/t8code_3d_dgsem/elixir_euler_sedov.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_euler_sedov.jl Co-authored-by: Andrew Winters * Cleanup up. * Added @allocated test. * Fixed formatting. * Applied formatter. * Added ParallelT8codeMesh to constructor. * Code cleanup. * Extended T8codeMesh backend to support MPI interfaces datastructures. * Update NEWS.md (#1780) * set version to v0.6.5 * set development version to v0.6.6-pre * hotfix: restrict DiffEqBase.jl to let CI pass (#1788) * hotfix: restrict DiffEqBase.jl to let CI pass * restrict DiffEqBase.jl in main Project.toml * Update Project.toml * Parabolic Mortar for AMR `P4estMesh{3}` (#1765) * Clean branch * Un-Comment * un-comment * test coarsen * remove redundancy * Remove support for passive terms * expand resize * comments * format * Avoid code duplication * Update src/callbacks_step/amr_dg1d.jl Co-authored-by: Michael Schlottke-Lakemper * comment * comment & format * Try to increase coverage * Slightly more expressive names * Apply suggestions from code review * add specifier for 1d * Structs for resizing parabolic helpers * check if mortars are present * reuse `reinitialize_containers!` * resize calls for parabolic helpers * update analysis callbacks * Velocities for compr euler * Init container * correct copy-paste error * resize each dim * add dispatch * Add AMR for shear layer * USe only amr shear layer * first steps towards p4est parabolic amr * Add tests * remove plots * Format * remove redundant line * platform independent tests * No need for different flux_viscous comps after adding container_viscous to p4est * Laplace 3d * Longer times to allow converage to hit coarsen! * Increase testing of Laplace 3D * Add tests for velocities * remove comment * add elixir for amr testing * adding commented out mortar routines in 2D * Adding Mortar to 2d dg parabolic term * remove testing snippet * fix comments * add more arguments for dispatch * add some temporary todo notes * some updates for AP and KS * specialize mortar_fluxes_to_elements * BUGFIX: apply_jacobian_parabolic! was incorrect for P4estMesh * fixed rhs_parabolic! for mortars * more changes to elixir * indexing bug * comments * Adding the example for nonperiodic BCs with amr * hopefully this fixes AMR boundaries for parabolic terms * add elixir * Example with non periodic bopundary conditions * remove cruft * 3D parabolic amr * TGV elixir * Creating test for AMR 3D parabolic * Formatting * test formatting * Update src/Trixi.jl * Update src/equations/compressible_euler_1d.jl * Update src/equations/compressible_euler_2d.jl * Update src/equations/compressible_euler_3d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Delete examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Create elixir_navierstokes_blast_wave_amr.jl * Update test_parabolic_3d.jl * Update NEWS.md * Update NEWS.md * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl --------- Co-authored-by: Daniel_Doehring Co-authored-by: Daniel Doehring Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> Co-authored-by: Jesse Chan * reset the timer also on non-root MPI processes (#1787) Co-authored-by: Michael Schlottke-Lakemper * Add HLLC flux for non-cartesian meshes to CompressibleEulerEquations{2,3}D (#1790) * add HLLC flux for non-cartesian meshes * add tests for HLLC flux * Add 2D test with HLLC * Update test_p4est_3d.jl * Update test_p4est_2d.jl * Update test_p4est_3d.jl * Update src/equations/compressible_euler_3d.jl Co-authored-by: Hendrik Ranocha * Update src/equations/compressible_euler_2d.jl Co-authored-by: Hendrik Ranocha * Update compressible_euler_2d.jl * Update compressible_euler_3d.jl * Update test_p4est_2d.jl * Update test_p4est_3d.jl * Update compressible_euler_2d.jl * Update compressible_euler_2d.jl --------- Co-authored-by: Daniel Doehring Co-authored-by: Hendrik Ranocha * Bump crate-ci/typos from 1.16.23 to 1.16.26 (#1793) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.23 to 1.16.26. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.16.23...v1.16.26) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Extend `CompressibleEulerQuasi1D` and `ShallowWaterQuasi1D` to `DGMulti` (#1797) * adding DGMulti versions of fluxes * remove incorrect factor of 2 * add example and test * formatting * add comment * revert removing factor of 2 * formatting * add SWE quasi-1D test d * enable quasi1D SWE for DGMulti * add docstrings * formatting * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Hendrik Ranocha * adding comments explaining why `normal_direction` is included in 1D * Apply suggestions from code review Co-authored-by: Daniel Doehring --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring * Fix boundary_condition_slip_wall for SWE (#1798) * fix_wall_bc * add test * apply formatter * adjust some comments * update elixir and test; add topography * comments explaining usage of `ForwardDiff.jacobian` (#1800) * Bump actions/upload-artifact from 3 to 4 (#1795) * Bump actions/upload-artifact from 3 to 4 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3 to 4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * bump download-artifact to v4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Hendrik Ranocha * Code cleanup. * Applied formatter. * Registered mpi t8code tests. * Added load-balancing feature to T8codeMesh. * Applied formatter. * Updated some test values. * Fixed typo. * Removed unused member variable. * Apply suggestions from code review Co-authored-by: Daniel Doehring * suggestions from review * fix format (strange?) * Added comments to help interpreting the source code. * Update src/callbacks_step/amr_dg3d.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Adhered to unified mesh constructor calling scheme. * Applied formatter. * Switched to Float64 instead of Cdouble. * Update src/meshes/t8code_mesh.jl Co-authored-by: Daniel Doehring * Refactored negative volume check. * Applied formatter. * Fixed typo resp. bug. * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * add missing allocation checks * Some refactoring. * Deleted msh file. * Fixed a bug. * Code cleanup. * Ignore gmsh files. * Removed adapt! from global namespace. * Added documentation. * Added @test_warn to test. * Applied formatter. * Apply suggestions from code review Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Turned @warn to @info. * Code cleanup and added @deprecated routines in order to avoid breaking release. * Applied formatter. * Added formatter pragmas to avoid ugly formatting. * Applied formatter. * Code cleanup. Documenting. * Applied formatter. * Removed remnants of development. * Removed dubious commented out line. * minors in comments skip ci * Added `retrieve` function which downloads missing mesh file without race conditions in case of MPI environment. * Added Downloads as dependency. * Added missing namespace qualifier. * add compat bound * Apply suggestions from code review Co-authored-by: Daniel Doehring Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * use retrieve everywhere for downloading * try to improve formatting * and what julia formatter makes out of it * try to consolidate max_dt computation * format * add default allocation tests * remove left over vtk_write * Revert "try to consolidate max_dt computation" This reverts commit d0e9a2cb2882f426a3966aac03e6ce497e1b720f. * fix variable name * less consolidation * remove unused variables * Update src/auxiliary/auxiliary.jl Co-authored-by: Hendrik Ranocha * Update src/Trixi.jl Co-authored-by: Hendrik Ranocha * revert b068d1183 (manually) * use Trixi.download * Revert "use retrieve everywhere for downloading" This reverts commit a438547e23bc489f12beea625dcc0905a50d18ce. * change to new Trixi.download signature * format * remove merge leftover * format * Code refactor. Applied review comments. * Fixed bug. * Fixed typo. * Update src/auxiliary/auxiliary.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Applying review comments. * Added return early if there is nothing to do in adapt routines. --------- Signed-off-by: dependabot[bot] Co-authored-by: Johannes Markert Co-authored-by: Hendrik Ranocha Co-authored-by: Andrew Winters Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> Co-authored-by: Hendrik Ranocha Co-authored-by: Ahmad Peyvan <115842305+apey236@users.noreply.github.com> Co-authored-by: Daniel_Doehring Co-authored-by: Daniel Doehring Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Jesse Chan Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Patrick Ersing <114223904+patrickersing@users.noreply.github.com> Co-authored-by: Benedict Geihe Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- Project.toml | 2 + .../elixir_2d_euler_vortex_unstructured.jl | 9 +- examples/dgmulti_2d/elixir_euler_hohqmesh.jl | 9 +- .../elixir_advection_amr_unstructured_flag.jl | 7 +- .../elixir_advection_unstructured_flag.jl | 7 +- .../elixir_euler_blast_wave_amr.jl | 7 +- .../elixir_euler_double_mach_amr.jl | 8 +- .../elixir_euler_forward_step_amr.jl | 8 +- .../elixir_euler_free_stream.jl | 7 +- ...e_terms_nonconforming_unstructured_flag.jl | 7 +- .../elixir_euler_supersonic_cylinder.jl | 8 +- .../elixir_euler_wall_bc_amr.jl | 8 +- examples/p4est_2d_dgsem/elixir_mhd_rotor.jl | 7 +- ...lixir_advection_amr_unstructured_curved.jl | 7 +- .../elixir_advection_unstructured_curved.jl | 7 +- examples/p4est_3d_dgsem/elixir_euler_ec.jl | 7 +- .../elixir_euler_free_stream.jl | 7 +- .../elixir_euler_free_stream_extruded.jl | 7 +- ...terms_nonconforming_unstructured_curved.jl | 7 +- ...euler_source_terms_nonperiodic_hohqmesh.jl | 8 +- .../elixir_shallowwater_well_balanced.jl | 1 - .../elixir_advection_amr_unstructured_flag.jl | 7 +- .../elixir_advection_unstructured_flag.jl | 7 +- .../elixir_euler_free_stream.jl | 7 +- ...e_terms_nonconforming_unstructured_flag.jl | 7 +- examples/t8code_2d_dgsem/elixir_mhd_rotor.jl | 7 +- ...lixir_advection_amr_unstructured_curved.jl | 9 +- .../elixir_advection_unstructured_curved.jl | 7 +- examples/t8code_3d_dgsem/elixir_euler_ec.jl | 7 +- .../elixir_euler_free_stream.jl | 7 +- .../elixir_euler_free_stream_extruded.jl | 7 +- ...terms_nonconforming_unstructured_curved.jl | 7 +- .../elixir_acoustics_gauss_wall.jl | 8 +- .../elixir_advection_basic.jl | 9 +- .../elixir_euler_basic.jl | 9 +- .../unstructured_2d_dgsem/elixir_euler_ec.jl | 9 +- .../elixir_euler_free_stream.jl | 9 +- .../elixir_euler_periodic.jl | 9 +- .../elixir_euler_sedov.jl | 7 +- .../elixir_euler_wall_bc.jl | 9 +- .../elixir_mhd_alfven_wave.jl | 8 +- .../unstructured_2d_dgsem/elixir_mhd_ec.jl | 8 +- .../elixir_shallowwater_dirichlet.jl | 8 +- .../elixir_shallowwater_ec.jl | 8 +- .../elixir_shallowwater_ec_shockcapturing.jl | 8 +- .../elixir_shallowwater_source_terms.jl | 8 +- ...ixir_shallowwater_three_mound_dam_break.jl | 13 +- ...lixir_shallowwater_twolayer_convergence.jl | 8 +- .../elixir_shallowwater_twolayer_dam_break.jl | 8 +- ...xir_shallowwater_twolayer_well_balanced.jl | 8 +- ...xir_shallowwater_wall_bc_shockcapturing.jl | 9 +- .../elixir_shallowwater_well_balanced.jl | 9 +- .../elixir_advection_basic.jl | 9 +- .../elixir_euler_free_stream.jl | 9 +- .../elixir_euler_source_terms.jl | 9 +- src/Trixi.jl | 1 + src/auxiliary/auxiliary.jl | 23 + src/auxiliary/t8code.jl | 235 +------ src/callbacks_step/amr.jl | 40 +- src/callbacks_step/amr_dg.jl | 11 +- src/callbacks_step/amr_dg2d.jl | 7 +- src/callbacks_step/amr_dg3d.jl | 7 +- src/callbacks_step/analysis_dg2d_parallel.jl | 6 +- src/callbacks_step/analysis_dg3d_parallel.jl | 6 +- src/callbacks_step/stepsize_dg2d.jl | 32 + src/callbacks_step/stepsize_dg3d.jl | 32 + src/meshes/p4est_mesh.jl | 4 + src/meshes/t8code_mesh.jl | 577 +++++++++++++++++- .../dgsem_p4est/containers_parallel.jl | 6 +- src/solvers/dgsem_p4est/dg_2d_parallel.jl | 21 +- src/solvers/dgsem_p4est/dg_3d_parallel.jl | 23 +- src/solvers/dgsem_p4est/dg_parallel.jl | 9 +- src/solvers/dgsem_t8code/containers.jl | 13 +- src/solvers/dgsem_t8code/containers_2d.jl | 3 +- src/solvers/dgsem_t8code/containers_3d.jl | 3 +- .../dgsem_t8code/containers_parallel.jl | 65 ++ src/solvers/dgsem_t8code/dg.jl | 7 +- src/solvers/dgsem_t8code/dg_parallel.jl | 135 ++++ src/solvers/dgsem_tree/dg_2d_parallel.jl | 3 +- test/test_mpi.jl | 4 +- test/test_mpi_p4est_2d.jl | 63 ++ test/test_mpi_p4est_3d.jl | 81 +++ test/test_mpi_t8code_2d.jl | 142 +++++ test/test_mpi_t8code_3d.jl | 180 ++++++ test/test_t8code_2d.jl | 7 +- 85 files changed, 1529 insertions(+), 640 deletions(-) create mode 100644 src/solvers/dgsem_t8code/containers_parallel.jl create mode 100644 src/solvers/dgsem_t8code/dg_parallel.jl create mode 100644 test/test_mpi_t8code_2d.jl create mode 100644 test/test_mpi_t8code_3d.jl diff --git a/Project.toml b/Project.toml index 0bbdec206d8..e99b08e0e81 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" @@ -57,6 +58,7 @@ ConstructionBase = "1.3" DataStructures = "0.18.15" DiffEqBase = "6 - 6.143" DiffEqCallbacks = "2.25" +Downloads = "1.6" EllipsisNotation = "1.0" FillArrays = "0.13.2, 1" ForwardDiff = "0.10.18" diff --git a/benchmark/elixir_2d_euler_vortex_unstructured.jl b/benchmark/elixir_2d_euler_vortex_unstructured.jl index 082b6648abf..43e4b6559de 100644 --- a/benchmark/elixir_2d_euler_vortex_unstructured.jl +++ b/benchmark/elixir_2d_euler_vortex_unstructured.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,11 +48,9 @@ end initial_condition = initial_condition_isentropic_vortex solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) -default_mesh_file = joinpath(@__DIR__, "mesh_uniform_cartesian.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/ranocha/f4ea19ba3b62348968c971db43d7798b/raw/a506abb9479c020920cf6068c142670fc1a9aadc/mesh_uniform_cartesian.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/ranocha/f4ea19ba3b62348968c971db43d7798b/raw/a506abb9479c020920cf6068c142670fc1a9aadc/mesh_uniform_cartesian.mesh", + joinpath(@__DIR__, "mesh_uniform_cartesian.mesh")) + mesh = UnstructuredMesh2D(mesh_file, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl index f534b5bc8ad..9b14a5c6827 100644 --- a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl +++ b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,12 +29,8 @@ dg = DGMulti(polydeg = 8, element_type = Quad(), approximation_type = SBP(), ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", + joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh")) mesh = DGMultiMesh(dg, mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index 0a50b3644f0..4bfb2d3e375 100644 --- a/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl index 37fcc547f60..1ab96925fe6 100644 --- a/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -29,10 +28,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl index 0ca4fdc2eb7..5db5f74a686 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -50,10 +49,8 @@ volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; solver = DGSEM(basis, surface_flux, volume_integral) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, initial_refinement_level = 1) diff --git a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl index 92928146d7b..fbc11e89185 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -99,11 +98,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_double_mach.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/a0806ef0d03cf5ea221af523167b6e32/raw/61ed0eb017eb432d996ed119a52fb041fe363e8c/abaqus_double_mach.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a0806ef0d03cf5ea221af523167b6e32/raw/61ed0eb017eb432d996ed119a52fb041fe363e8c/abaqus_double_mach.inp", + joinpath(@__DIR__, "abaqus_double_mach.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl index 0ec9fc222f2..654efd5e209 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -104,11 +103,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_forward_step.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/b346ee6aa5446687f128eab8b37d52a7/raw/cd1e1d43bebd8d2631a07caec45585ec8456ca4c/abaqus_forward_step.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b346ee6aa5446687f128eab8b37d52a7/raw/cd1e1d43bebd8d2631a07caec45585ec8456ca4c/abaqus_forward_step.inp", + joinpath(@__DIR__, "abaqus_forward_step.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl index 38307a7d781..ab11dc11567 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ end # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) # Map the unstructured mesh with the mapping above mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping, initial_refinement_level = 1) diff --git a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 09d018309a6..084fd699b8e 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ mapping_flag = Trixi.transfinite_mapping(faces) # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl index 36c5624ba97..76ee96d4766 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl @@ -13,7 +13,6 @@ # # Keywords: supersonic flow, shock capturing, AMR, unstructured curved mesh, positivity preservation, compressible Euler, 2D -using Downloads: download using OrdinaryDiffEq using Trixi @@ -82,11 +81,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_cylinder_in_channel.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/a08f78f6b185b63c3baeff911a63f628/raw/addac716ea0541f588b9d2bd3f92f643eb27b88f/abaqus_cylinder_in_channel.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a08f78f6b185b63c3baeff911a63f628/raw/addac716ea0541f588b9d2bd3f92f643eb27b88f/abaqus_cylinder_in_channel.inp", + joinpath(@__DIR__, "abaqus_cylinder_in_channel.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl index 8b8d05bade8..75e60d0c78b 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -39,11 +38,8 @@ solver = DGSEM(polydeg = 5, surface_flux = flux_lax_friedrichs, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_gingerbread_man.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/0e9e990a04b5105d1d2e3096a6e41272/raw/0d924b1d7e7d3cc1070a6cc22fe1d501687aa6dd/abaqus_gingerbread_man.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/0e9e990a04b5105d1d2e3096a6e41272/raw/0d924b1d7e7d3cc1070a6cc22fe1d501687aa6dd/abaqus_gingerbread_man.inp", + joinpath(@__DIR__, "abaqus_gingerbread_man.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl index 380db487356..089e82580c9 100644 --- a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -69,10 +68,8 @@ function mapping_twist(xi, eta) return SVector(x, y) end -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 4, diff --git a/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl index cd280cf5bf6..33afd2e030e 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,10 +48,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl index 6df9ac0b16a..83adcbf6a63 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 3, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_ec.jl b/examples/p4est_3d_dgsem/elixir_euler_ec.jl index d9d774a7ffc..91698545052 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_ec.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 5, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl index 24a781ca59e..6406a38186b 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -47,10 +46,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl index f56fe3a429d..08307a449a7 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -36,10 +35,8 @@ function mapping(xi, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 3, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index 0de22eaea40..e7ca0cad4ba 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,10 +48,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl index 0fa3a28fe8b..7d81d6739bf 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,11 +18,8 @@ boundary_conditions = Dict(:Bottom => boundary_condition, solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) # Unstructured 3D half circle mesh from HOHQMesh -default_mesh_file = joinpath(@__DIR__, "abaqus_half_circle_3d.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/11461efbfb02c42e06aca338b3d0b645/raw/81deeb1ebc4945952c30af5bb75fe222a18d975c/abaqus_half_circle_3d.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/11461efbfb02c42e06aca338b3d0b645/raw/81deeb1ebc4945952c30af5bb75fe222a18d975c/abaqus_half_circle_3d.inp", + joinpath(@__DIR__, "abaqus_half_circle_3d.inp")) mesh = P4estMesh{3}(mesh_file, initial_refinement_level = 0) diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl index 61dd252fd83..a6a56aa807c 100644 --- a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl +++ b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index f285d24fc6c..0923e328487 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl index 5ba1ab15489..ba8f1b59b80 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -28,10 +27,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n. -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl index 37d15f38566..5e6c4193c50 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,10 +29,8 @@ end # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index bcc1abc560e..e496eb76729 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,10 +29,8 @@ mapping_flag = Trixi.transfinite_mapping(faces) # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl index adb154948fb..ff2e40ae607 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -68,10 +67,8 @@ function mapping_twist(xi, eta) return SVector(x, y) end -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl index 617736afbdd..e7c0f4b7318 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -47,11 +46,9 @@ function mapping(xi, eta, zeta) return SVector(5 * x, 5 * y, 5 * z) end -# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3. +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl index df358435c9a..ee27ee117fe 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -45,10 +44,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_ec.jl b/examples/t8code_3d_dgsem/elixir_euler_ec.jl index 07745c3ac56..b720bfcd375 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_ec.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_ec.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -45,10 +44,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl index e135d464810..b70a6091adf 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl index d129b59826e..6ae38d20b5a 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -35,10 +34,8 @@ function mapping(xi, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index d4664522bea..6856be36ea1 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -48,10 +47,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl index 8f8e867dca8..9741430d11c 100644 --- a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl +++ b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -14,11 +13,8 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (0.0, -0.5), solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) # Create unstructured quadrilateral mesh from a file -default_mesh_file = joinpath(@__DIR__, "mesh_five_circles_in_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/3c79baad6b4d73bb26ec6420b5d16f45/raw/22aefc4ec2107cf0bffc40e81dfbc52240c625b1/mesh_five_circles_in_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/3c79baad6b4d73bb26ec6420b5d16f45/raw/22aefc4ec2107cf0bffc40e81dfbc52240c625b1/mesh_five_circles_in_circle.mesh", + joinpath(@__DIR__, "mesh_five_circles_in_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl index afef6c2c38f..c0ee453344d 100644 --- a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -16,12 +15,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl index cd6a1995757..f8976120d53 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -25,12 +24,8 @@ solver = DGSEM(polydeg = 8, surface_flux = flux_lax_friedrichs) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", + joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_ec.jl b/examples/unstructured_2d_dgsem/elixir_euler_ec.jl index 0f53aa62a18..58b4d9a1dd2 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,12 +18,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_ranocha, ############################################################################### # Get the curved quad mesh from a file - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl b/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl index a2fec1a320a..f266a3de0b2 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -26,12 +25,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_hll) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_gingerbread_man.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", + joinpath(@__DIR__, "mesh_gingerbread_man.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl b/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl index afd177f0740..e640001ad7f 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,12 +18,8 @@ solver = DGSEM(polydeg = 6, surface_flux = FluxRotated(flux_hll)) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl index e1cc0932969..06053273b74 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl @@ -55,11 +55,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the curved quad mesh from a file -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl b/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl index b2abefe7eeb..65e5eb51ce6 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -40,12 +39,8 @@ solver = DGSEM(polydeg = 4, surface_flux = flux_hll) ############################################################################### # Get the curved quad mesh from a file - -default_mesh_file = joinpath(@__DIR__, "mesh_box_around_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8b9b11a1eedfa54b215c122c3d17b271/raw/0d2b5d98c87e67a6f384693a8b8e54b4c9fcbf3d/mesh_box_around_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8b9b11a1eedfa54b215c122c3d17b271/raw/0d2b5d98c87e67a6f384693a8b8e54b4c9fcbf3d/mesh_box_around_circle.mesh", + joinpath(@__DIR__, "mesh_box_around_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl index 3ed3e828ca8..0c7152a6ea0 100644 --- a/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -17,12 +16,9 @@ solver = DGSEM(polydeg = 7, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) -mesh_file = default_mesh_file mesh = UnstructuredMesh2D(mesh_file, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl b/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl index a40f92cac02..805934e305d 100644 --- a/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -35,11 +34,8 @@ solver = DGSEM(polydeg = 6, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl index 1148f25fae3..df1a69192ce 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -38,11 +37,8 @@ solver = DGSEM(polydeg = 4, surface_flux = (flux_hll, flux_nonconservative_fjord # This setup is for the curved, split form well-balancedness testing # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_outer_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", + joinpath(@__DIR__, "mesh_outer_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl index 8e9d396d826..9122fb8287d 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -25,11 +24,8 @@ solver = DGSEM(polydeg = 6, # This setup is for the curved, split form entropy conservation testing (needs periodic BCs) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl index 94202b81df0..98408db5a78 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -34,11 +33,8 @@ solver = DGSEM(basis, surface_flux, volume_integral) # This setup is for the curved, split form entropy conservation testing (needs periodic BCs) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl index 07668688406..a7aa5808955 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -23,11 +22,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form convergence test on a periodic domain # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl index 6164f9d4a55..df321aad267 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -93,16 +92,10 @@ solver = DGSEM(basis, surface_flux, volume_integral) ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) +mesh_file = Trixi.download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", + joinpath(@__DIR__, "mesh_three_mound.mesh")) -default_meshfile = joinpath(@__DIR__, "mesh_three_mound.mesh") - -isfile(default_meshfile) || - download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", - default_meshfile) - -meshfile = default_meshfile - -mesh = UnstructuredMesh2D(meshfile) +mesh = UnstructuredMesh2D(mesh_file) # Create the semi discretization object semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl index 0b86095663a..fcc08b6f991 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -24,11 +23,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form convergence test on a periodic domain # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl index 4ad5f7e3201..821f31c52ac 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -48,11 +47,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = false) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl index 6a727df2502..ca1f54595bb 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -42,11 +41,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form well-balancedness testing # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl index 76b9642d595..f115113ed27 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -55,12 +54,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_outer_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", + joinpath(@__DIR__, "mesh_outer_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl index bf4d0be682a..6bad3a77f03 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -36,13 +35,9 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, ############################################################################### # This setup is for the curved, split form well-balancedness testing - # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl index c181203e7a4..fe7e708f3b3 100644 --- a/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl +++ b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -21,12 +20,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl index 7ada50c0c65..25a81c16bf9 100644 --- a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl +++ b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -33,12 +32,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_gingerbread_man.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", + joinpath(@__DIR__, "mesh_gingerbread_man.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl index edcd221bf59..5f11d41ad5c 100644 --- a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl +++ b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -22,12 +21,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/src/Trixi.jl b/src/Trixi.jl index e18b2f6415c..8d74fbc9736 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -39,6 +39,7 @@ import SciMLBase: get_du, get_tmp_cache, u_modified!, get_proposed_dt, set_proposed_dt!, terminate!, remake, add_tstop!, has_tstop, first_tstop +using Downloads: Downloads using CodeTracking: CodeTracking using ConstructionBase: ConstructionBase using DiffEqCallbacks: PeriodicCallback, PeriodicCallbackAffect diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 1f7d30d6aa8..92da9a5ba8b 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -345,4 +345,27 @@ function register_error_hints() return nothing end + +""" + Trixi.download(src_url, file_path) + +Download a file from given `src_url` to given `file_path` if +`file_path` is not already a file. This function just returns +`file_path`. +This is a small wrapper of `Downloads.download(src_url, file_path)` +that avoids race conditions when multiple MPI ranks are used. +""" +function download(src_url, file_path) + # Note that `mpi_isroot()` is also `true` if running + # in serial (without MPI). + if mpi_isroot() + isfile(file_path) || Downloads.download(src_url, file_path) + end + + if mpi_isparallel() + MPI.Barrier(mpi_comm()) + end + + return file_path +end end # @muladd diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl index db01476bb86..7c1399fc803 100644 --- a/src/auxiliary/t8code.jl +++ b/src/auxiliary/t8code.jl @@ -46,230 +46,6 @@ function init_t8code() return nothing end -function trixi_t8_unref_forest(forest) - t8_forest_unref(Ref(forest)) -end - -function t8_free(ptr) - T8code.Libt8.sc_free(t8_get_package_id(), ptr) -end - -function trixi_t8_count_interfaces(forest) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 - - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) - - current_index = t8_locidx_t(0) - - local_num_conform = 0 - local_num_mortars = 0 - local_num_boundary = 0 - - for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) - - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) - - for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) - - level = t8_element_level(eclass_scheme, element) - - num_faces = t8_element_num_faces(eclass_scheme, element) - - for iface in 0:(num_faces - 1) - pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() - pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() - - dual_faces_ref = Ref{Ptr{Cint}}() - num_neighbors_ref = Ref{Cint}() - - forest_is_balanced = Cint(1) - - t8_forest_leaf_face_neighbors(forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, - num_neighbors_ref, - pelement_indices_ref, pneigh_scheme_ref, - forest_is_balanced) - - num_neighbors = num_neighbors_ref[] - neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], - num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) - neighbor_scheme = pneigh_scheme_ref[] - - if num_neighbors > 0 - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] - local_num_conform += 1 - elseif level < neighbor_level - local_num_mortars += 1 - end - else - local_num_boundary += 1 - end - - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) - t8_free(pelement_indices_ref[]) - end # for - - current_index += 1 - end # for - end # for - - return (interfaces = local_num_conform, - mortars = local_num_mortars, - boundaries = local_num_boundary) -end - -function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundaries, - boundary_names) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 - - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) - - current_index = t8_locidx_t(0) - - local_num_conform = 0 - local_num_mortars = 0 - local_num_boundary = 0 - - for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) - - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) - - for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) - - level = t8_element_level(eclass_scheme, element) - - num_faces = t8_element_num_faces(eclass_scheme, element) - - for iface in 0:(num_faces - 1) - - # Compute the `orientation` of the touching faces. - if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 - cmesh = t8_forest_get_cmesh(forest) - itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(forest, itree) - iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) - orientation_ref = Ref{Cint}() - - t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL, - orientation_ref) - orientation = orientation_ref[] - else - orientation = zero(Cint) - end - - pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() - pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() - - dual_faces_ref = Ref{Ptr{Cint}}() - num_neighbors_ref = Ref{Cint}() - - forest_is_balanced = Cint(1) - - t8_forest_leaf_face_neighbors(forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, - num_neighbors_ref, - pelement_indices_ref, pneigh_scheme_ref, - forest_is_balanced) - - num_neighbors = num_neighbors_ref[] - dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) - neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], - num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) - neighbor_scheme = pneigh_scheme_ref[] - - if num_neighbors > 0 - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] - local_num_conform += 1 - - faces = (iface, dual_faces[1]) - interface_id = local_num_conform - - # Write data to interfaces container. - interfaces.neighbor_ids[1, interface_id] = current_index + 1 - interfaces.neighbor_ids[2, interface_id] = neighbor_ielements[1] + 1 - - # Save interfaces.node_indices dimension specific in containers_3d.jl. - init_interface_node_indices!(interfaces, faces, orientation, - interface_id) - # Non-conforming interface. - elseif level < neighbor_level - local_num_mortars += 1 - - faces = (dual_faces[1], iface) - - mortar_id = local_num_mortars - - # Last entry is the large element. - mortars.neighbor_ids[end, mortar_id] = current_index + 1 - - # Fill in the `mortars.neighbor_ids` array and reorder if necessary. - init_mortar_neighbor_ids!(mortars, faces[2], faces[1], - orientation, neighbor_ielements, - mortar_id) - - # Fill in the `mortars.node_indices` array. - init_mortar_node_indices!(mortars, faces, orientation, mortar_id) - - # else: "level > neighbor_level" is skipped since we visit the mortar interface only once. - end - - # Domain boundary. - else - local_num_boundary += 1 - boundary_id = local_num_boundary - - boundaries.neighbor_ids[boundary_id] = current_index + 1 - - init_boundary_node_indices!(boundaries, iface, boundary_id) - - # One-based indexing. - boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] - end - - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) - t8_free(pelement_indices_ref[]) - end # for iface = ... - - current_index += 1 - end # for - end # for - - return (interfaces = local_num_conform, - mortars = local_num_mortars, - boundaries = local_num_boundary) -end - function trixi_t8_get_local_element_levels(forest) # Check that forest is a committed, that is valid and usable, forest. @assert t8_forest_is_committed(forest) != 0 @@ -341,23 +117,16 @@ function adapt_callback(forest, end function trixi_t8_adapt_new(old_forest, indicators) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(old_forest) != 0 - - # Init new forest. new_forest_ref = Ref{t8_forest_t}() t8_forest_init(new_forest_ref) new_forest = new_forest_ref[] - let set_from = C_NULL, recursive = 0, set_for_coarsening = 0, no_repartition = 0, - do_ghost = 1 - + let set_from = C_NULL, recursive = 0, no_repartition = 1, do_ghost = 1 t8_forest_set_user_data(new_forest, pointer(indicators)) t8_forest_set_adapt(new_forest, old_forest, @t8_adapt_callback(adapt_callback), recursive) t8_forest_set_balance(new_forest, set_from, no_repartition) - t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) t8_forest_commit(new_forest) end diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 5854c8617c3..6f57d6647fc 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -726,7 +726,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, return has_changed end -function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMesh, +function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::T8codeMesh, equations, dg::DG, cache, semi, t, iter; only_refine = false, only_coarsen = false, @@ -754,29 +754,29 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMe @trixi_timeit timer() "adapt" begin difference = @trixi_timeit timer() "mesh" trixi_t8_adapt!(mesh, indicators) - @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, - cache, difference) - end + # Store whether there were any cells coarsened or refined and perform load balancing. + has_changed = any(difference .!= 0) - # Store whether there were any cells coarsened or refined and perform load balancing. - has_changed = any(difference .!= 0) + # Check if mesh changed on other processes + if mpi_isparallel() + has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] + end - # TODO: T8codeMesh for MPI not implemented yet. - # Check if mesh changed on other processes - # if mpi_isparallel() - # has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] - # end + if has_changed + @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, + cache, difference) + end + end if has_changed - # TODO: T8codeMesh for MPI not implemented yet. - # if mpi_isparallel() && amr_callback.dynamic_load_balancing - # @trixi_timeit timer() "dynamic load balancing" begin - # global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1) - # old_global_first_quadrant = copy(global_first_quadrant) - # partition!(mesh) - # rebalance_solver!(u_ode, mesh, equations, dg, cache, old_global_first_quadrant) - # end - # end + if mpi_isparallel() && amr_callback.dynamic_load_balancing + @trixi_timeit timer() "dynamic load balancing" begin + old_global_first_element_ids = get_global_first_element_ids(mesh) + partition!(mesh) + rebalance_solver!(u_ode, mesh, equations, dg, cache, + old_global_first_element_ids) + end + end reinitialize_boundaries!(semi.boundary_conditions, cache) end diff --git a/src/callbacks_step/amr_dg.jl b/src/callbacks_step/amr_dg.jl index 1dcfdccdea8..0a7055af409 100644 --- a/src/callbacks_step/amr_dg.jl +++ b/src/callbacks_step/amr_dg.jl @@ -6,11 +6,14 @@ #! format: noindent # Redistribute data for load balancing after partitioning the mesh -function rebalance_solver!(u_ode::AbstractVector, mesh::ParallelP4estMesh, equations, +function rebalance_solver!(u_ode::AbstractVector, + mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, + equations, dg::DGSEM, cache, old_global_first_quadrant) - # mpi ranks are 0-based, this array uses 1-based indices - global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, - mpi_nranks() + 1) + + # MPI ranks are 0-based. This array uses 1-based indices. + global_first_quadrant = get_global_first_element_ids(mesh) + if global_first_quadrant[mpi_rank() + 1] == old_global_first_quadrant[mpi_rank() + 1] && global_first_quadrant[mpi_rank() + 2] == diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index b816bc06e65..94524b23a3a 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -385,7 +385,12 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, # Return early if there is nothing to do. if !any(difference .!= 0) - return nothing + if mpi_isparallel() + # MPICache init uses all-to-all communication -> reinitialize even if there is nothing to do + # locally (there still might be other MPI ranks that have refined elements) + reinitialize_containers!(mesh, equations, dg, cache) + end + return end # Number of (local) cells/elements. diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index 392cbba9e28..3f67951bafe 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -316,7 +316,12 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{3}, equations, # Return early if there is nothing to do. if !any(difference .!= 0) - return nothing + if mpi_isparallel() + # MPICache init uses all-to-all communication -> reinitialize even if there is nothing to do + # locally (there still might be other MPI ranks that have refined elements) + reinitialize_containers!(mesh, equations, dg, cache) + end + return end # Number of (local) cells/elements. diff --git a/src/callbacks_step/analysis_dg2d_parallel.jl b/src/callbacks_step/analysis_dg2d_parallel.jl index a04bf732604..000daa015dc 100644 --- a/src/callbacks_step/analysis_dg2d_parallel.jl +++ b/src/callbacks_step/analysis_dg2d_parallel.jl @@ -91,7 +91,8 @@ function calc_error_norms_per_element(func, u, t, analyzer, end function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -171,7 +172,8 @@ function integrate_via_indices(func::Func, u, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/analysis_dg3d_parallel.jl b/src/callbacks_step/analysis_dg3d_parallel.jl index d8756d91c9d..de777be406d 100644 --- a/src/callbacks_step/analysis_dg3d_parallel.jl +++ b/src/callbacks_step/analysis_dg3d_parallel.jl @@ -6,7 +6,8 @@ #! format: noindent function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -64,7 +65,8 @@ function calc_error_norms(func, u, t, analyzer, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/stepsize_dg2d.jl b/src/callbacks_step/stepsize_dg2d.jl index 673c3ba6aa6..c6d32c0f6dc 100644 --- a/src/callbacks_step/stepsize_dg2d.jl +++ b/src/callbacks_step/stepsize_dg2d.jl @@ -174,4 +174,36 @@ function max_dt(u, t, mesh::ParallelP4estMesh{2}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end end # @muladd diff --git a/src/callbacks_step/stepsize_dg3d.jl b/src/callbacks_step/stepsize_dg3d.jl index 822ab2f87ec..664596f989e 100644 --- a/src/callbacks_step/stepsize_dg3d.jl +++ b/src/callbacks_step/stepsize_dg3d.jl @@ -150,4 +150,36 @@ function max_dt(u, t, mesh::ParallelP4estMesh{3}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end end # @muladd diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index c5d39ef00c0..abe9d9345b5 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -1700,6 +1700,10 @@ function bilinear_interpolation!(coordinate, face_vertices, u, v) end end +function get_global_first_element_ids(mesh::P4estMesh) + return unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1) +end + function balance!(mesh::P4estMesh{2}, init_fn = C_NULL) p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn) # Due to a bug in `p4est`, the forest needs to be rebalanced twice sometimes diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index c9665a22af9..6fb4d861d10 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -7,8 +7,6 @@ to manage trees and mesh refinement. """ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: AbstractMesh{NDIMS} - cmesh :: Ptr{t8_cmesh} # cpointer to coarse mesh - scheme :: Ptr{t8_eclass_scheme} # cpointer to element scheme forest :: Ptr{t8_forest} # cpointer to forest is_parallel :: IsParallel @@ -25,14 +23,15 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: nmortars :: Int nboundaries :: Int - function T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + nmpiinterfaces :: Int + nmpimortars :: Int + + function T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, current_filename) where {NDIMS} - is_parallel = False() + is_parallel = mpi_isparallel() ? True() : False() - mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(cmesh, - scheme, - forest, + mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(forest, is_parallel) mesh.nodes = nodes @@ -52,7 +51,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: # further down. However, this might cause a pile-up of `mesh` # objects during long-running sessions. if !MPI.Finalized() - trixi_t8_unref_forest(mesh.forest) + t8_forest_unref(Ref(mesh.forest)) end end @@ -63,7 +62,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: # more information. if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE") MPI.add_finalize_hook!() do - trixi_t8_unref_forest(mesh.forest) + t8_forest_unref(Ref(mesh.forest)) end end @@ -72,16 +71,15 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: end const SerialT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:False} +const ParallelT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:True} @inline mpi_parallel(mesh::SerialT8codeMesh) = False() +@inline mpi_parallel(mesh::ParallelT8codeMesh) = True() @inline Base.ndims(::T8codeMesh{NDIMS}) where {NDIMS} = NDIMS @inline Base.real(::T8codeMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT -@inline ntrees(mesh::T8codeMesh) = Int(t8_forest_get_num_local_trees(mesh.forest)) +@inline ntrees(mesh::T8codeMesh) = size(mesh.tree_node_coordinates)[end] @inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest)) -@inline ninterfaces(mesh::T8codeMesh) = mesh.ninterfaces -@inline nmortars(mesh::T8codeMesh) = mesh.nmortars -@inline nboundaries(mesh::T8codeMesh) = mesh.nboundaries function Base.show(io::IO, mesh::T8codeMesh) print(io, "T8codeMesh{", ndims(mesh), ", ", real(mesh), "}") @@ -184,21 +182,23 @@ function T8codeMesh(trees_per_dimension; polydeg = 1, T8code.Libt8.p8est_connectivity_destroy(conn) end + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, + mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes + num_trees = t8_cmesh_get_num_trees(cmesh) + tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - prod(trees_per_dimension)) + num_trees) # Get cell length in reference mesh: Omega_ref = [-1,1]^NDIMS. dx = [2 / n for n in trees_per_dimension] - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) - # Non-periodic boundaries. boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension)) @@ -208,7 +208,7 @@ function T8codeMesh(trees_per_dimension; polydeg = 1, mapping_ = mapping end - for itree in 1:num_local_trees + for itree in 1:num_trees veptr = t8_cmesh_get_tree_vertices(cmesh, itree - 1) verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) @@ -256,7 +256,7 @@ function T8codeMesh(trees_per_dimension; polydeg = 1, end end - return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, "") end @@ -290,21 +290,25 @@ function T8codeMesh(cmesh::Ptr{t8_cmesh}; @assert (NDIMS == 2||NDIMS == 3) "NDIMS should be 2 or 3." + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, + mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + num_trees = t8_cmesh_get_num_trees(cmesh) tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - num_local_trees) + num_trees) nodes_in = [-1.0, 1.0] matrix = polynomial_interpolation_matrix(nodes_in, nodes) + num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + if NDIMS == 2 data_in = Array{RealT, 3}(undef, 2, 2, 2) tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in)) @@ -353,7 +357,7 @@ function T8codeMesh(cmesh::Ptr{t8_cmesh}; tmp1 = zeros(RealT, 3, length(nodes), length(nodes_in), length(nodes_in)) verts = zeros(3, 8) - for itree in 0:(num_local_trees - 1) + for itree in 0:(num_trees - 1) veptr = t8_cmesh_get_tree_vertices(cmesh, itree) # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))` @@ -387,9 +391,9 @@ function T8codeMesh(cmesh::Ptr{t8_cmesh}; map_node_coordinates!(tree_node_coordinates, mapping) # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 2 * NDIMS, num_local_trees) + boundary_names = fill(:all, 2 * NDIMS, num_trees) - return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, "") end @@ -442,7 +446,7 @@ function T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) end """ - T8codeMesh{NDIMS}(meshfile::String; kwargs...) + T8codeMesh(meshfile::String, ndims; kwargs...) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a Gmsh mesh file (`.msh`). @@ -461,7 +465,6 @@ mesh from a Gmsh mesh file (`.msh`). - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ function T8codeMesh(meshfile::String, ndims; kwargs...) - # Prevent `t8code` from crashing Julia if the file doesn't exist. @assert isfile(meshfile) @@ -586,13 +589,525 @@ function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = tr return nothing end -# TODO: Just a placeholder. Will be implemented later when MPI is supported. -function balance!(mesh::T8codeMesh, init_fn = C_NULL) +""" + Trixi.balance!(mesh::T8codeMesh) + +Balance a `T8codeMesh` to ensure 2^(NDIMS-1):1 face neighbors. +""" +function balance!(mesh::T8codeMesh) + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + let set_from = mesh.forest, no_repartition = 1, do_ghost = 1 + t8_forest_set_balance(new_forest, set_from, no_repartition) + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + + return nothing +end + +""" + Trixi.partition!(mesh::T8codeMesh) + +Partition a `T8codeMesh` in order to redistribute elements evenly among MPI ranks. + +# Arguments +- `mesh::T8codeMesh`: Initialized mesh object. +""" +function partition!(mesh::T8codeMesh) + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + let set_from = mesh.forest, do_ghost = 1, allow_for_coarsening = 1 + t8_forest_set_partition(new_forest, set_from, allow_for_coarsening) + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + return nothing end -# TODO: Just a placeholder. Will be implemented later when MPI is supported. -function partition!(mesh::T8codeMesh; allow_coarsening = true, weight_fn = C_NULL) +# Compute the global ids (zero-indexed) of first element in each MPI rank. +function get_global_first_element_ids(mesh::T8codeMesh) + n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) + n_elements_by_rank = Vector{Int}(undef, mpi_nranks()) + n_elements_by_rank[mpi_rank() + 1] = n_elements_local + MPI.Allgather!(MPI.UBuffer(n_elements_by_rank, 1), mpi_comm()) + return [sum(n_elements_by_rank[1:(rank - 1)]) for rank in 1:(mpi_nranks() + 1)] +end + +function count_interfaces(mesh::T8codeMesh) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + current_index = t8_locidx_t(0) + + local_num_conform = 0 + local_num_mortars = 0 + local_num_boundary = 0 + + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + visited_global_mortar_ids = Set{UInt64}([]) + + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + if mpi_isparallel() + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_tree_element_offsets = [num_local_elements + + t8_forest_ghost_get_tree_element_offset(mesh.forest, + itree) + for itree in 0:(ghost_num_trees - 1)] + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + end + + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) + + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + + level = t8_element_level(eclass_scheme, element) + + num_faces = t8_element_num_faces(eclass_scheme, element) + + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + + t8_element_get_linear_id(eclass_scheme, element, max_level) + + for iface in 0:(num_faces - 1) + pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() + pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() + + dual_faces_ref = Ref{Ptr{Cint}}() + num_neighbors_ref = Ref{Cint}() + + forest_is_balanced = Cint(1) + + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, + pneighbor_leafs_ref, iface, dual_faces_ref, + num_neighbors_ref, + pelement_indices_ref, pneigh_scheme_ref, + forest_is_balanced) + + num_neighbors = num_neighbors_ref[] + dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) + neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], + num_neighbors) + neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_scheme = pneigh_scheme_ref[] + + if num_neighbors == 0 + local_num_boundary += 1 + else + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + + if all(neighbor_ielements .< num_local_elements) + # Conforming interface: The second condition ensures we + # only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] + local_num_conform += 1 + elseif level < neighbor_level + local_num_mortars += 1 + # `else level > neighbor_level` is ignored since we + # only want to count the mortar interface once. + end + else + if level == neighbor_level + local_num_mpi_conform += 1 + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + else # level > neighbor_level + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leafs[1], + max_level) + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + + dual_faces[1] + + if !(global_mortar_id in visited_global_mortar_ids) + push!(visited_global_mortar_ids, global_mortar_id) + local_num_mpi_mortars += 1 + end + end + end + end + + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leafs_ref[]) + t8_free(pelement_indices_ref[]) + end # for + + current_index += 1 + end # for + end # for + + return (interfaces = local_num_conform, + mortars = local_num_mortars, + boundaries = local_num_boundary, + mpi_interfaces = local_num_mpi_conform, + mpi_mortars = local_num_mpi_mortars) +end + +# I know this routine is an unmaintainable behemoth. However, I see no real +# and elegant way to refactor this into, for example, smaller parts. The +# `t8_forest_leaf_face_neighbors` routine is as of now rather costly and it +# makes sense to query it only once per face per element and extract all the +# information needed at once in order to fill the connectivity information. +# Instead, I opted for good documentation. +function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, + boundary_names; mpi_mesh_info = nothing) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + if !isnothing(mpi_mesh_info) + #! format: off + remotes = t8_forest_ghost_get_remotes(mesh.forest) + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_remote_first_elem = [num_local_elements + + t8_forest_ghost_remote_first_elem(mesh.forest, remote) + for remote in remotes] + + ghost_tree_element_offsets = [num_local_elements + + t8_forest_ghost_get_tree_element_offset(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + #! format: on + end + + # Process-local index of the current element in the space-filling curve. + current_index = t8_locidx_t(0) + + # Increment counters for the different interface/mortar/boundary types. + local_num_conform = 0 + local_num_mortars = 0 + local_num_boundary = 0 + + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + # Works for quads and hexs only. This mapping is needed in the MPI mortar + # sections below. + map_iface_to_ichild_to_position = [ + # 0 1 2 3 4 5 6 7 ichild/iface + [1, 0, 2, 0, 3, 0, 4, 0], # 0 + [0, 1, 0, 2, 0, 3, 0, 4], # 1 + [1, 2, 0, 0, 3, 4, 0, 0], # 2 + [0, 0, 1, 2, 0, 0, 3, 4], # 3 + [1, 2, 3, 4, 0, 0, 0, 0], # 4 + [0, 0, 0, 0, 1, 2, 3, 4], # 5 + ] + + # Helper variables to compute unique global MPI interface/mortar ids. + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + # These two variables help to ensure that we count MPI mortars from smaller + # elements point of view only once. + visited_global_mortar_ids = Set{UInt64}([]) + global_mortar_id_to_local = Dict{UInt64, Int}([]) + + # Loop over all local trees. + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) + + # Loop over all local elements of the current local tree. + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + + level = t8_element_level(eclass_scheme, element) + + num_faces = t8_element_num_faces(eclass_scheme, element) + + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + + t8_element_get_linear_id(eclass_scheme, element, max_level) + + # Loop over all faces of the current local element. + for iface in 0:(num_faces - 1) + # Compute the `orientation` of the touching faces. + if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 + cmesh = t8_forest_get_cmesh(mesh.forest) + itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(mesh.forest, itree) + iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) + orientation_ref = Ref{Cint}() + + t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL, + orientation_ref) + orientation = orientation_ref[] + else + orientation = zero(Cint) + end + + pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() + pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() + + dual_faces_ref = Ref{Ptr{Cint}}() + num_neighbors_ref = Ref{Cint}() + + forest_is_balanced = Cint(1) + + # Query neighbor information from t8code. + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, + pneighbor_leafs_ref, iface, dual_faces_ref, + num_neighbors_ref, + pelement_indices_ref, pneigh_scheme_ref, + forest_is_balanced) + + num_neighbors = num_neighbors_ref[] + dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) + neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], + num_neighbors) + neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_scheme = pneigh_scheme_ref[] + + # Now we check for the different cases. The nested if-structure is as follows: + # + # if `boundary`: + # + # + # else: // It must be an interface or mortar. + # + # if `all neighbors are local elements`: + # + # if `local interface`: + # + # elseif `local mortar from larger element point of view`: + # + # else: // `local mortar from smaller elements point of view` + # // We only count local mortars once. + # + # else: // It must be either a MPI interface or a MPI mortar. + # + # if `MPI interface`: + # + # elseif `MPI mortar from larger element point of view`: + # + # else: // `MPI mortar from smaller elements point of view` + # + # + # // end + + # Domain boundary. + if num_neighbors == 0 + local_num_boundary += 1 + boundary_id = local_num_boundary + + boundaries.neighbor_ids[boundary_id] = current_index + 1 + + init_boundary_node_indices!(boundaries, iface, boundary_id) + + # One-based indexing. + boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] + + # Interface or mortar. + else + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + + # Local interface or mortar. + if all(neighbor_ielements .< num_local_elements) + + # Local interface: The second condition ensures we only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] + local_num_conform += 1 + + interfaces.neighbor_ids[1, local_num_conform] = current_index + + 1 + interfaces.neighbor_ids[2, local_num_conform] = neighbor_ielements[1] + + 1 + + init_interface_node_indices!(interfaces, (iface, dual_faces[1]), + orientation, + local_num_conform) + # Local mortar. + elseif level < neighbor_level + local_num_mortars += 1 + + # Last entry is the large element. + mortars.neighbor_ids[end, local_num_mortars] = current_index + 1 + + init_mortar_neighbor_ids!(mortars, iface, dual_faces[1], + orientation, neighbor_ielements, + local_num_mortars) + + init_mortar_node_indices!(mortars, (dual_faces[1], iface), + orientation, local_num_mortars) + + # else: `level > neighbor_level` is skipped since we visit the mortar interface only once. + end + + # MPI interface or MPI mortar. + else + + # MPI interface. + if level == neighbor_level + local_num_mpi_conform += 1 + + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leafs[1], + max_level) + + if current_linear_id < neighbor_linear_id + local_side = 1 + smaller_iface = iface + smaller_linear_id = current_linear_id + faces = (iface, dual_faces[1]) + else + local_side = 2 + smaller_iface = dual_faces[1] + smaller_linear_id = neighbor_linear_id + faces = (dual_faces[1], iface) + end + + global_interface_id = 2 * ndims(mesh) * smaller_linear_id + + smaller_iface + + mpi_mesh_info.mpi_interfaces.local_neighbor_ids[local_num_mpi_conform] = current_index + + 1 + mpi_mesh_info.mpi_interfaces.local_sides[local_num_mpi_conform] = local_side + + init_mpi_interface_node_indices!(mpi_mesh_info.mpi_interfaces, + faces, local_side, orientation, + local_num_mpi_conform) + + neighbor_rank = remotes[findlast(ghost_remote_first_elem .<= + neighbor_ielements[1])] + mpi_mesh_info.neighbor_ranks_interface[local_num_mpi_conform] = neighbor_rank + + mpi_mesh_info.global_interface_ids[local_num_mpi_conform] = global_interface_id + + # MPI Mortar: from larger element point of view + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + neighbor_ids = neighbor_ielements .+ 1 + + local_neighbor_positions = findall(neighbor_ids .<= + num_local_elements) + local_neighbor_ids = [neighbor_ids[i] + for i in local_neighbor_positions] + local_neighbor_positions = [map_iface_to_ichild_to_position[dual_faces[1] + 1][t8_element_child_id(neighbor_scheme, neighbor_leafs[i]) + 1] + for i in local_neighbor_positions] + + # Last entry is the large element. + push!(local_neighbor_ids, current_index + 1) + push!(local_neighbor_positions, 2^(ndims(mesh) - 1) + 1) + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_num_mpi_mortars] = local_neighbor_ids + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_num_mpi_mortars] = local_neighbor_positions + + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, + (dual_faces[1], iface), orientation, + local_num_mpi_mortars) + + neighbor_ranks = [remotes[findlast(ghost_remote_first_elem .<= + ineighbor_ghost)] + for ineighbor_ghost in filter(x -> x >= + num_local_elements, + neighbor_ielements)] + mpi_mesh_info.neighbor_ranks_mortar[local_num_mpi_mortars] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_num_mpi_mortars] = global_mortar_id + + # MPI Mortar: from smaller elements point of view + else + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leafs[1], + max_level) + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + + dual_faces[1] + + if global_mortar_id in visited_global_mortar_ids + local_mpi_mortar_id = global_mortar_id_to_local[global_mortar_id] + + push!(mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id], + current_index + 1) + push!(mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id], + map_iface_to_ichild_to_position[iface + 1][t8_element_child_id(eclass_scheme, element) + 1]) + else + local_num_mpi_mortars += 1 + local_mpi_mortar_id = local_num_mpi_mortars + push!(visited_global_mortar_ids, global_mortar_id) + global_mortar_id_to_local[global_mortar_id] = local_mpi_mortar_id + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id] = [ + current_index + 1, + ] + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id] = [ + map_iface_to_ichild_to_position[iface + 1][t8_element_child_id(eclass_scheme, element) + 1], + ] + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, + (iface, dual_faces[1]), + orientation, local_mpi_mortar_id) + + neighbor_ranks = [ + remotes[findlast(ghost_remote_first_elem .<= + neighbor_ielements[1])], + ] + mpi_mesh_info.neighbor_ranks_mortar[local_mpi_mortar_id] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_mpi_mortar_id] = global_mortar_id + end + end + end + end + + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leafs_ref[]) + t8_free(pelement_indices_ref[]) + end # for iface + + current_index += 1 + end # for ielement + end # for itree + return nothing end diff --git a/src/solvers/dgsem_p4est/containers_parallel.jl b/src/solvers/dgsem_p4est/containers_parallel.jl index 7c7bd868457..fd2749155bb 100644 --- a/src/solvers/dgsem_p4est/containers_parallel.jl +++ b/src/solvers/dgsem_p4est/containers_parallel.jl @@ -43,7 +43,8 @@ function Base.resize!(mpi_interfaces::P4estMPIInterfaceContainer, capacity) end # Create MPI interface container and initialize interface data -function init_mpi_interfaces(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_interfaces(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, + equations, basis, elements) NDIMS = ndims(elements) uEltype = eltype(elements) @@ -133,7 +134,8 @@ function Base.resize!(mpi_mortars::P4estMPIMortarContainer, capacity) end # Create MPI mortar container and initialize MPI mortar data -function init_mpi_mortars(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_mortars(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, equations, + basis, elements) NDIMS = ndims(mesh) RealT = real(mesh) uEltype = eltype(elements) diff --git a/src/solvers/dgsem_p4est/dg_2d_parallel.jl b/src/solvers/dgsem_p4est/dg_2d_parallel.jl index a8887351c46..3bf0cd0cab5 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parallel.jl @@ -6,7 +6,8 @@ #! format: noindent function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -43,7 +44,8 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -106,7 +108,8 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -131,7 +134,8 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -199,7 +203,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -253,7 +257,8 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -271,7 +276,9 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer) diff --git a/src/solvers/dgsem_p4est/dg_3d_parallel.jl b/src/solvers/dgsem_p4est/dg_3d_parallel.jl index 13bf2a1a2eb..e504e06d2c4 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parallel.jl @@ -6,7 +6,7 @@ #! format: noindent function rhs!(du, u, t, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data @@ -113,7 +113,8 @@ function rhs!(du, u, t, end function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -160,7 +161,8 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -237,7 +239,8 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -265,7 +268,8 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -374,7 +378,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -437,7 +441,8 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -456,7 +461,9 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer, fstar_tmp) diff --git a/src/solvers/dgsem_p4est/dg_parallel.jl b/src/solvers/dgsem_p4est/dg_parallel.jl index 712ede2bfce..eaa6ab5cee2 100644 --- a/src/solvers/dgsem_p4est/dg_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_parallel.jl @@ -166,7 +166,8 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 or 2). The data corresponding to the large side (i.e. position 3) has # size `2 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{2}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -185,7 +186,8 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 to 4). The data corresponding to the large side (i.e. position 5) has # size `4 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{3}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -491,7 +493,8 @@ end # Exchange normal directions of small elements of the MPI mortars. They are needed on all involved # MPI ranks to calculate the mortar fluxes. -function exchange_normal_directions!(mpi_mortars, mpi_cache, mesh::ParallelP4estMesh, +function exchange_normal_directions!(mpi_mortars, mpi_cache, + mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, n_nodes) RealT = real(mesh) n_dims = ndims(mesh) diff --git a/src/solvers/dgsem_t8code/containers.jl b/src/solvers/dgsem_t8code/containers.jl index 093feb2985a..d7ff79fbf2f 100644 --- a/src/solvers/dgsem_t8code/containers.jl +++ b/src/solvers/dgsem_t8code/containers.jl @@ -18,19 +18,22 @@ function reinitialize_containers!(mesh::T8codeMesh, equations, dg::DGSEM, cache) @unpack boundaries = cache resize!(boundaries, mesh.nboundaries) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, - mesh.boundary_names) + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names) return nothing end function count_required_surfaces!(mesh::T8codeMesh) - counts = trixi_t8_count_interfaces(mesh.forest) + counts = count_interfaces(mesh) mesh.nmortars = counts.mortars mesh.ninterfaces = counts.interfaces mesh.nboundaries = counts.boundaries + mesh.nmpimortars = counts.mpi_mortars + mesh.nmpiinterfaces = counts.mpi_interfaces + return counts end @@ -38,7 +41,9 @@ end function count_required_surfaces(mesh::T8codeMesh) return (interfaces = mesh.ninterfaces, mortars = mesh.nmortars, - boundaries = mesh.nboundaries) + boundaries = mesh.nboundaries, + mpi_interfaces = mesh.nmpiinterfaces, + mpi_mortars = mesh.nmpimortars) end # Compatibility to `dgsem_p4est/containers.jl`. diff --git a/src/solvers/dgsem_t8code/containers_2d.jl b/src/solvers/dgsem_t8code/containers_2d.jl index bf77826a34b..ce525bfdf65 100644 --- a/src/solvers/dgsem_t8code/containers_2d.jl +++ b/src/solvers/dgsem_t8code/containers_2d.jl @@ -26,6 +26,7 @@ function calc_node_coordinates!(node_coordinates, tree_class = t8_forest_get_tree_class(mesh.forest, itree) eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) @@ -55,7 +56,7 @@ function calc_node_coordinates!(node_coordinates, multiply_dimensionwise!(view(node_coordinates, :, :, :, current_index += 1), matrix1, matrix2, view(mesh.tree_node_coordinates, :, :, :, - itree + 1), + global_itree + 1), tmp1) end end diff --git a/src/solvers/dgsem_t8code/containers_3d.jl b/src/solvers/dgsem_t8code/containers_3d.jl index f2d54ff07da..4d56bc734aa 100644 --- a/src/solvers/dgsem_t8code/containers_3d.jl +++ b/src/solvers/dgsem_t8code/containers_3d.jl @@ -28,6 +28,7 @@ function calc_node_coordinates!(node_coordinates, tree_class = t8_forest_get_tree_class(mesh.forest, itree) eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) @@ -63,7 +64,7 @@ function calc_node_coordinates!(node_coordinates, current_index += 1), matrix1, matrix2, matrix3, view(mesh.tree_node_coordinates, :, :, :, :, - itree + 1), + global_itree + 1), tmp1) end end diff --git a/src/solvers/dgsem_t8code/containers_parallel.jl b/src/solvers/dgsem_t8code/containers_parallel.jl new file mode 100644 index 00000000000..0cb3f5887a0 --- /dev/null +++ b/src/solvers/dgsem_t8code/containers_parallel.jl @@ -0,0 +1,65 @@ +function reinitialize_containers!(mesh::ParallelT8codeMesh, equations, dg::DGSEM, cache) + @unpack elements, interfaces, boundaries, mortars, mpi_interfaces, mpi_mortars, + mpi_cache = cache + resize!(elements, ncells(mesh)) + init_elements!(elements, mesh, dg.basis) + + count_required_surfaces!(mesh) + required = count_required_surfaces(mesh) + + resize!(interfaces, required.interfaces) + + resize!(boundaries, required.boundaries) + + resize!(mortars, required.mortars) + + resize!(mpi_interfaces, required.mpi_interfaces) + + resize!(mpi_mortars, required.mpi_mortars) + + mpi_mesh_info = (mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + + # Temporary arrays for updating `mpi_cache`. + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, + nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, nmpiinterfaces(mpi_interfaces))) + + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), + eltype(elements)) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + # Re-initialize and distribute normal directions of MPI mortars; requires + # MPI communication, so the MPI cache must be re-initialized beforehand. + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_interfaces!(interfaces, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_mortars!(mortars, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers_parallel.jl`. +function init_mpi_mortars!(mpi_mortars, mesh::ParallelT8codeMesh, basis, elements) + # Do nothing. + return nothing +end diff --git a/src/solvers/dgsem_t8code/dg.jl b/src/solvers/dgsem_t8code/dg.jl index 6e9660c917d..e01b12e0f80 100644 --- a/src/solvers/dgsem_t8code/dg.jl +++ b/src/solvers/dgsem_t8code/dg.jl @@ -13,8 +13,8 @@ function create_cache(mesh::T8codeMesh, equations::AbstractEquations, dg::DG, :: boundaries = init_boundaries(mesh, equations, dg.basis, elements) mortars = init_mortars(mesh, equations, dg.basis, elements) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, - mesh.boundary_names) + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names) cache = (; elements, interfaces, boundaries, mortars) @@ -29,4 +29,7 @@ end include("containers.jl") include("containers_2d.jl") include("containers_3d.jl") + +include("containers_parallel.jl") +include("dg_parallel.jl") end # @muladd diff --git a/src/solvers/dgsem_t8code/dg_parallel.jl b/src/solvers/dgsem_t8code/dg_parallel.jl new file mode 100644 index 00000000000..ece614b7d75 --- /dev/null +++ b/src/solvers/dgsem_t8code/dg_parallel.jl @@ -0,0 +1,135 @@ +@muladd begin +#! format: noindent + +# This method is called when a `SemidiscretizationHyperbolic` is constructed. +# It constructs the basic `cache` used throughout the simulation to compute +# the RHS etc. +function create_cache(mesh::ParallelT8codeMesh, equations::AbstractEquations, dg::DG, + ::Any, + ::Type{uEltype}) where {uEltype <: Real} + # Make sure to balance and partition the forest before creating any + # containers in case someone has tampered with forest after creating the + # mesh. + balance!(mesh) + partition!(mesh) + + count_required_surfaces!(mesh) + + elements = init_elements(mesh, equations, dg.basis, uEltype) + mortars = init_mortars(mesh, equations, dg.basis, elements) + interfaces = init_interfaces(mesh, equations, dg.basis, elements) + boundaries = init_boundaries(mesh, equations, dg.basis, elements) + + mpi_mortars = init_mpi_mortars(mesh, equations, dg.basis, elements) + mpi_interfaces = init_mpi_interfaces(mesh, equations, dg.basis, elements) + + mpi_mesh_info = (mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), + nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, + nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, + nmpiinterfaces(mpi_interfaces))) + + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + mpi_cache = init_mpi_cache(mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), + uEltype) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + cache = (; elements, interfaces, mpi_interfaces, boundaries, mortars, mpi_mortars, + mpi_cache) + + # Add specialized parts of the cache required to compute the volume integral etc. + cache = (; cache..., + create_cache(mesh, equations, dg.volume_integral, dg, uEltype)...) + cache = (; cache..., create_cache(mesh, equations, dg.mortar, uEltype)...) + + return cache +end + +function init_mpi_cache(mesh::ParallelT8codeMesh, mpi_mesh_info, nvars, nnodes, uEltype) + mpi_cache = P4estMPICache(uEltype) + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvars, nnodes, uEltype) + return mpi_cache +end + +function init_mpi_cache!(mpi_cache::P4estMPICache, mesh::ParallelT8codeMesh, + mpi_mesh_info, nvars, nnodes, uEltype) + mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars = init_mpi_neighbor_connectivity(mpi_mesh_info, + mesh) + + mpi_send_buffers, mpi_recv_buffers, mpi_send_requests, mpi_recv_requests = init_mpi_data_structures(mpi_neighbor_interfaces, + mpi_neighbor_mortars, + ndims(mesh), + nvars, + nnodes, + uEltype) + + n_elements_global = Int(t8_forest_get_global_num_elements(mesh.forest)) + n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) + + n_elements_by_rank = Vector{Int}(undef, mpi_nranks()) + n_elements_by_rank[mpi_rank() + 1] = n_elements_local + + MPI.Allgather!(MPI.UBuffer(n_elements_by_rank, 1), mpi_comm()) + + n_elements_by_rank = OffsetArray(n_elements_by_rank, 0:(mpi_nranks() - 1)) + + # Account for 1-based indexing in Julia. + first_element_global_id = sum(n_elements_by_rank[0:(mpi_rank() - 1)]) + 1 + + @assert n_elements_global==sum(n_elements_by_rank) "error in total number of elements" + + @pack! mpi_cache = mpi_neighbor_ranks, mpi_neighbor_interfaces, + mpi_neighbor_mortars, + mpi_send_buffers, mpi_recv_buffers, + mpi_send_requests, mpi_recv_requests, + n_elements_by_rank, n_elements_global, + first_element_global_id + + return mpi_cache +end + +function init_mpi_neighbor_connectivity(mpi_mesh_info, mesh::ParallelT8codeMesh) + @unpack mpi_interfaces, mpi_mortars, global_interface_ids, neighbor_ranks_interface, global_mortar_ids, neighbor_ranks_mortar = mpi_mesh_info + + mpi_neighbor_ranks = vcat(neighbor_ranks_interface, neighbor_ranks_mortar...) |> + sort |> unique + + p = sortperm(global_interface_ids) + + neighbor_ranks_interface .= neighbor_ranks_interface[p] + interface_ids = collect(1:nmpiinterfaces(mpi_interfaces))[p] + + p = sortperm(global_mortar_ids) + neighbor_ranks_mortar .= neighbor_ranks_mortar[p] + mortar_ids = collect(1:nmpimortars(mpi_mortars))[p] + + # For each neighbor rank, init connectivity data structures + mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + mpi_neighbor_mortars = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + for (index, d) in enumerate(mpi_neighbor_ranks) + mpi_neighbor_interfaces[index] = interface_ids[findall(==(d), + neighbor_ranks_interface)] + mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (d in x), + neighbor_ranks_mortar)] + end + + # Check that all interfaces were counted exactly once + @assert mapreduce(length, +, mpi_neighbor_interfaces; init = 0) == + nmpiinterfaces(mpi_interfaces) + + return mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars +end +end # @muladd diff --git a/src/solvers/dgsem_tree/dg_2d_parallel.jl b/src/solvers/dgsem_tree/dg_2d_parallel.jl index 8095dae123a..157d462aa2f 100644 --- a/src/solvers/dgsem_tree/dg_2d_parallel.jl +++ b/src/solvers/dgsem_tree/dg_2d_parallel.jl @@ -446,7 +446,8 @@ function init_mpi_neighbor_connectivity(elements, mpi_interfaces, mpi_mortars, end function rhs!(du, u, t, - mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}}, equations, + mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data diff --git a/test/test_mpi.jl b/test/test_mpi.jl index ad1ba4e835d..1ab1282b891 100644 --- a/test/test_mpi.jl +++ b/test/test_mpi.jl @@ -19,10 +19,12 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() # TreeMesh tests include("test_mpi_tree.jl") - # P4estMesh tests + # P4estMesh and T8codeMesh tests include("test_mpi_p4est_2d.jl") + include("test_mpi_t8code_2d.jl") if !CI_ON_WINDOWS # see comment on `CI_ON_WINDOWS` above include("test_mpi_p4est_3d.jl") + include("test_mpi_t8code_3d.jl") end end # MPI diff --git a/test/test_mpi_p4est_2d.jl b/test/test_mpi_p4est_2d.jl index da90537fcfd..6d66bc68a26 100644 --- a/test/test_mpi_p4est_2d.jl +++ b/test/test_mpi_p4est_2d.jl @@ -33,6 +33,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 end end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_nonconforming_flag.jl" begin @@ -40,6 +49,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") "elixir_advection_nonconforming_flag.jl"), l2=[3.198940059144588e-5], linf=[0.00030636069494005547]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_unstructured_flag.jl" begin @@ -47,6 +65,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") "elixir_advection_unstructured_flag.jl"), l2=[0.0005379687442422346], linf=[0.007438525029884735]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_solution_independent.jl" begin @@ -56,6 +83,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") l2=[4.949660644033807e-5], linf=[0.0004867846262313763], coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin @@ -64,6 +100,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") l2=[0.0012766060609964525], linf=[0.01750280631586159], coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_restart.jl" begin @@ -73,6 +118,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") # With the default `maxiters = 1` in coverage tests, # there would be no time steps after the restart. coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin @@ -90,6 +144,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") 0.03759938693042297, 0.08039824959535657, ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end end # P4estMesh MPI diff --git a/test/test_mpi_p4est_3d.jl b/test/test_mpi_p4est_3d.jl index 75f43650082..cca9093ec51 100644 --- a/test/test_mpi_p4est_3d.jl +++ b/test/test_mpi_p4est_3d.jl @@ -33,6 +33,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 end end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr.jl" begin @@ -46,6 +55,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") initial_refinement_level = 2, base_level = 2, med_level = 3, max_level = 4)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin @@ -58,6 +76,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") initial_refinement_level = 0, base_level = 0, med_level = 1, max_level = 2)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_restart.jl" begin @@ -67,12 +94,30 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") # With the default `maxiters = 1` in coverage tests, # there would be no time steps after the restart. coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_cubed_sphere.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_cubed_sphere.jl"), l2=[0.002006918015656413], linf=[0.027655117058380085]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end # Compressible Euler @@ -94,6 +139,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.008526972236273522, ], tspan=(0.0, 0.01)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin @@ -114,6 +168,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.01562861968368434, ], tspan=(0.0, 1.0)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_ec.jl" begin @@ -134,6 +197,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") ], tspan=(0.0, 0.2), coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonperiodic_hohqmesh.jl" begin @@ -153,6 +225,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.048396544302230504, 0.1154589758186293, ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end end # P4estMesh MPI diff --git a/test/test_mpi_t8code_2d.jl b/test/test_mpi_t8code_2d.jl new file mode 100644 index 00000000000..7c7fc03898c --- /dev/null +++ b/test/test_mpi_t8code_2d.jl @@ -0,0 +1,142 @@ +module TestExamplesMPIT8codeMesh2D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") + +@testset "T8codeMesh MPI 2D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 2D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[8.311947673061856e-6], + linf=[6.627000273229378e-5]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[3.3022040342579066e-5] rtol=1.0e-4 + @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 + end + end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_nonconforming_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_nonconforming_flag.jl"), + l2=[3.198940059144588e-5], + linf=[0.00030636069494005547]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_unstructured_flag.jl"), + l2=[0.0005379687442422346], + linf=[0.007438525029884735]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_solution_independent.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_solution_independent.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[4.933027431215839e-5], + linf=[0.00048678461161243136], + coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_flag.jl"), + l2=[0.001980652042312077], + linf=[0.0328882442132265], + coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), + l2=[ + 0.0034516244508588046, + 0.0023420334036925493, + 0.0024261923964557187, + 0.004731710454271893, + ], + linf=[ + 0.04155789011775046, + 0.024772109862748914, + 0.03759938693042297, + 0.08039824959535657, + ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end +end # T8codeMesh MPI + +end # module diff --git a/test/test_mpi_t8code_3d.jl b/test/test_mpi_t8code_3d.jl new file mode 100644 index 00000000000..a15690a7629 --- /dev/null +++ b/test/test_mpi_t8code_3d.jl @@ -0,0 +1,180 @@ +module TestExamplesMPIT8codeMesh3D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") + +@testset "T8codeMesh MPI 3D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 3D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[0.00016263963870641478], + linf=[0.0014537194925779984]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[0.00016800412839949264] rtol=1.0e-4 + @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 + end + end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[1.1302812803902801e-5], + linf=[0.0007889950196294793], + # override values are different from the serial tests to ensure each process holds at least + # one element, otherwise OrdinaryDiffEq fails during initialization + coverage_override=(maxiters = 6, + initial_refinement_level = 2, + base_level = 2, med_level = 3, + max_level = 4)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_curved.jl"), + l2=[2.0556575425846923e-5], + linf=[0.00105682693484822], + tspan=(0.0, 1.0), + coverage_override=(maxiters = 6, + initial_refinement_level = 0, + base_level = 0, med_level = 1, + max_level = 2)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # Compressible Euler + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_curved.jl"), + l2=[ + 4.070355207909268e-5, + 4.4993257426833716e-5, + 5.10588457841744e-5, + 5.102840924036687e-5, + 0.00019986264001630542, + ], + linf=[ + 0.0016987332417202072, + 0.003622956808262634, + 0.002029576258317789, + 0.0024206977281964193, + 0.008526972236273522, + ], + tspan=(0.0, 0.01)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonperiodic.jl"), + l2=[ + 0.0015106060984283647, + 0.0014733349038567685, + 0.00147333490385685, + 0.001473334903856929, + 0.0028149479453087093, + ], + linf=[ + 0.008070806335238156, + 0.009007245083113125, + 0.009007245083121784, + 0.009007245083102688, + 0.01562861968368434, + ], + tspan=(0.0, 1.0)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), + l2=[ + 0.010380390326164493, + 0.006192950051354618, + 0.005970674274073704, + 0.005965831290564327, + 0.02628875593094754, + ], + linf=[ + 0.3326911600075694, + 0.2824952141320467, + 0.41401037398065543, + 0.45574161423218573, + 0.8099577682187109, + ], + tspan=(0.0, 0.2), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end +end # T8codeMesh MPI + +end # module diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl index ab95e068d02..d536a6dd73a 100644 --- a/test/test_t8code_2d.jl +++ b/test/test_t8code_2d.jl @@ -33,10 +33,9 @@ end @trixi_testset "test check_for_negative_volumes" begin @test_warn "Discovered negative volumes" begin # Unstructured mesh with six cells which have left-handed node ordering. - mesh_file = joinpath(EXAMPLES_DIR, "rectangle_with_negative_volumes.msh") - isfile(mesh_file) || - download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh", - mesh_file) + mesh_file = Trixi.download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh", + joinpath(EXAMPLES_DIR, + "rectangle_with_negative_volumes.msh")) # This call should throw a warning about negative volumes detected. mesh = T8codeMesh(mesh_file, 2) From c7cee980f56835e9c396424e742efe85da465042 Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Wed, 31 Jan 2024 19:09:15 +0100 Subject: [PATCH 089/166] Remove race condition in mpi testing (#1821) * remove race condition in mpi testing * add additional barriers --- test/test_mpi.jl | 2 ++ test/test_mpi_tree.jl | 3 ++- test/test_threaded.jl | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/test/test_mpi.jl b/test/test_mpi.jl index 1ab1282b891..001d9bff86e 100644 --- a/test/test_mpi.jl +++ b/test/test_mpi.jl @@ -8,6 +8,7 @@ include("test_trixi.jl") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) # CI with MPI and some tests fails often on Windows. Thus, we check whether this # is the case here. We use GitHub Actions, so we can check whether we run CI @@ -45,5 +46,6 @@ end # MPI supporting functionality # Clean up afterwards: delete Trixi.jl output directory Trixi.mpi_isroot() && @test_nowarn rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) end # module diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl index 0831f6a1313..6351a405b5d 100644 --- a/test/test_mpi_tree.jl +++ b/test/test_mpi_tree.jl @@ -76,7 +76,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() # Here, we also test that SaveSolutionCallback prints multiple mesh files with AMR # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" - isdir(outdir) && rm(outdir, recursive = true) + Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) + Trixi.MPI.Barrier(Trixi.mpi_comm()) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_refine_twice.jl"), l2=[0.00020547512522578292], diff --git a/test/test_threaded.jl b/test/test_threaded.jl index dbbcbf4c7ce..a8a1b1b425a 100644 --- a/test/test_threaded.jl +++ b/test/test_threaded.jl @@ -8,6 +8,7 @@ include("test_trixi.jl") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) @testset "Threaded tests" begin #! format: noindent @@ -471,5 +472,6 @@ end # Clean up afterwards: delete Trixi.jl output directory Trixi.mpi_isroot() && isdir(outdir) && @test_nowarn rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) end # module From dfd632e69631ff4dbb42215966d4f7a546b92816 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Wed, 31 Jan 2024 21:50:27 +0100 Subject: [PATCH 090/166] Add subcell positivity limiting of non-linear variables (#1738) * Add positivity limiting of non-linear variables * Revise derivative function call; Add default derivative version * Adapt test to actually test pos limiter for nonlinear variables * Add unit test to test default implementation of variable_derivative * Clean up comments and code * Rename Newton-bisection variables * Implement suggestions * Relocate functions * Implement suggestions * Change error message for negative value with low-order method * Add changes from main to new limiter * Update NEWS.md * Rename is_valid_state and gradient_u --------- Co-authored-by: Michael Schlottke-Lakemper --- NEWS.md | 1 + ...kelvin_helmholtz_instability_sc_subcell.jl | 91 ++++++++ .../elixir_mhd_shockcapturing_subcell.jl | 7 +- src/callbacks_stage/subcell_bounds_check.jl | 8 + .../subcell_bounds_check_2d.jl | 18 ++ src/equations/compressible_euler_2d.jl | 21 ++ src/equations/equations.jl | 6 + src/equations/ideal_glm_mhd_2d.jl | 23 ++ src/solvers/dgsem_tree/subcell_limiters.jl | 60 +++-- src/solvers/dgsem_tree/subcell_limiters_2d.jl | 215 +++++++++++++++++- test/test_tree_2d_euler.jl | 26 +++ test/test_tree_2d_mhd.jl | 32 +-- test/test_unit.jl | 23 +- 13 files changed, 496 insertions(+), 35 deletions(-) create mode 100644 examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl diff --git a/NEWS.md b/NEWS.md index 3a3a504a911..02a723fca45 100644 --- a/NEWS.md +++ b/NEWS.md @@ -11,6 +11,7 @@ for human readability. - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` - Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. +- Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` ## Changes when updating to v0.6 from v0.5.x diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl new file mode 100644 index 00000000000..1817672778a --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl @@ -0,0 +1,91 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations +gamma = 1.4 +equations = CompressibleEulerEquations2D(gamma) + +""" + initial_condition_kelvin_helmholtz_instability(x, t, equations::CompressibleEulerEquations2D) + +A version of the classical Kelvin-Helmholtz instability based on +- Andrés M. Rueda-Ramírez, Gregor J. Gassner (2021) + A Subcell Finite Volume Positivity-Preserving Limiter for DGSEM Discretizations + of the Euler Equations + [arXiv: 2102.06017](https://arxiv.org/abs/2102.06017) +""" +function initial_condition_kelvin_helmholtz_instability(x, t, + equations::CompressibleEulerEquations2D) + # change discontinuity to tanh + # typical resolution 128^2, 256^2 + # domain size is [-1,+1]^2 + slope = 15 + amplitude = 0.02 + B = tanh(slope * x[2] + 7.5) - tanh(slope * x[2] - 7.5) + rho = 0.5 + 0.75 * B + v1 = 0.5 * (B - 1) + v2 = 0.1 * sin(2 * pi * x[1]) + p = 1.0 + return prim2cons(SVector(rho, v1, v2, p), equations) +end +initial_condition = initial_condition_kelvin_helmholtz_instability + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +limiter_idp = SubcellLimiterIDP(equations, basis; + positivity_variables_cons = ["rho"], + positivity_variables_nonlinear = [pressure]) +volume_integral = VolumeIntegralSubcellLimiting(limiter_idp; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (-1.0, -1.0) +coordinates_max = (1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 100_000) +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 3.7) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +save_restart = SaveRestartCallback(interval = 1000, + save_final_restart = true) + +stepsize_callback = StepsizeCallback(cfl = 0.7) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback, + save_restart, save_solution) + +############################################################################### +# run the simulation + +stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback(save_errors = false)) + +sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl index fe9ad92467f..74d0370647a 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl @@ -22,7 +22,7 @@ function initial_condition_blast_wave(x, t, equations::IdealGlmMhdEquations2D) r = sqrt(x[1]^2 + x[2]^2) pmax = 10.0 - pmin = 1.0 + pmin = 0.01 rhomax = 1.0 rhomin = 0.01 if r <= 0.09 @@ -52,7 +52,8 @@ basis = LobattoLegendreBasis(3) limiter_idp = SubcellLimiterIDP(equations, basis; positivity_variables_cons = ["rho"], - positivity_correction_factor = 0.5) + positivity_variables_nonlinear = [pressure], + positivity_correction_factor = 0.1) volume_integral = VolumeIntegralSubcellLimiting(limiter_idp; volume_flux_dg = volume_flux, volume_flux_fv = surface_flux) @@ -84,7 +85,7 @@ save_solution = SaveSolutionCallback(interval = 100, save_final_solution = true, solution_variables = cons2prim) -cfl = 0.5 +cfl = 0.4 stepsize_callback = StepsizeCallback(cfl = cfl) glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl index 9f34a6b3b4b..4dbf44d29c4 100644 --- a/src/callbacks_stage/subcell_bounds_check.jl +++ b/src/callbacks_stage/subcell_bounds_check.jl @@ -97,6 +97,9 @@ function init_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimi end print(f, ", " * string(variables[v]) * "_min") end + for variable in limiter.positivity_variables_nonlinear + print(f, ", " * string(variable) * "_min") + end end println(f) end @@ -142,6 +145,11 @@ end println(string(variables[v]) * ":\n- positivity: ", idp_bounds_delta_global[Symbol(string(v), "_min")]) end + for variable in limiter.positivity_variables_nonlinear + variable_string = string(variable) + println(variable_string * ":\n- positivity: ", + idp_bounds_delta_global[Symbol(variable_string, "_min")]) + end end println("─"^100 * "\n") diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl index 545d19b5136..19d73968c9a 100644 --- a/src/callbacks_stage/subcell_bounds_check_2d.jl +++ b/src/callbacks_stage/subcell_bounds_check_2d.jl @@ -60,6 +60,20 @@ deviation_threaded[stride_size * Threads.threadid()] = deviation end end + for variable in limiter.positivity_variables_nonlinear + key = Symbol(string(variable), "_min") + deviation_threaded = idp_bounds_delta_local[key] + @threaded for element in eachelement(solver, cache) + deviation = deviation_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = variable(get_node_vars(u, equations, solver, i, j, element), + equations) + deviation = max(deviation, + variable_bounds[key][i, j, element] - var) + end + deviation_threaded[stride_size * Threads.threadid()] = deviation + end + end end for (key, _) in idp_bounds_delta_local @@ -92,6 +106,10 @@ print(f, ", ", idp_bounds_delta_local[Symbol(string(v), "_min")][stride_size]) end + for variable in limiter.positivity_variables_nonlinear + print(f, ", ", + idp_bounds_delta_local[Symbol(string(variable), "_min")][stride_size]) + end end println(f) end diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl index 3c6f759db2b..f5a632723cf 100644 --- a/src/equations/compressible_euler_2d.jl +++ b/src/equations/compressible_euler_2d.jl @@ -1632,6 +1632,18 @@ end return p end +# Transformation from conservative variables u to d(p)/d(u) +@inline function gradient_conservative(::typeof(pressure), + u, equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v_square = v1^2 + v2^2 + + return (equations.gamma - 1.0) * SVector(0.5 * v_square, -v1, -v2, 1.0) +end + @inline function density_pressure(u, equations::CompressibleEulerEquations2D) rho, rho_v1, rho_v2, rho_e = u rho_times_p = (equations.gamma - 1) * (rho * rho_e - 0.5 * (rho_v1^2 + rho_v2^2)) @@ -1699,4 +1711,13 @@ end @inline function energy_internal(cons, equations::CompressibleEulerEquations2D) return energy_total(cons, equations) - energy_kinetic(cons, equations) end + +# State validation for Newton-bisection method of subcell IDP limiting +@inline function Base.isvalid(u, equations::CompressibleEulerEquations2D) + p = pressure(u, equations) + if u[1] <= 0.0 || p <= 0.0 + return false + end + return true +end end # @muladd diff --git a/src/equations/equations.jl b/src/equations/equations.jl index 7a3c326984d..c041bf117ba 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -376,6 +376,12 @@ of the correct length `nvariables(equations)`. """ function energy_internal end +# Default implementation of gradient for `variable`. Used for subcell limiting. +# Implementing a gradient function for a specific variable improves the performance. +@inline function gradient_conservative(variable, u, equations) + return ForwardDiff.gradient(x -> variable(x, equations), u) +end + #################################################################################################### # Include files with actual implementations for different systems of equations. diff --git a/src/equations/ideal_glm_mhd_2d.jl b/src/equations/ideal_glm_mhd_2d.jl index 43d1991e34b..4366cd32f08 100644 --- a/src/equations/ideal_glm_mhd_2d.jl +++ b/src/equations/ideal_glm_mhd_2d.jl @@ -1118,6 +1118,20 @@ end return p end +# Transformation from conservative variables u to d(p)/d(u) +@inline function gradient_conservative(::typeof(pressure), + u, equations::IdealGlmMhdEquations2D) + rho, rho_v1, rho_v2, rho_v3, rho_e, B1, B2, B3, psi = u + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v3 = rho_v3 / rho + v_square = v1^2 + v2^2 + v3^2 + + return (equations.gamma - 1.0) * + SVector(0.5 * v_square, -v1, -v2, -v3, 1.0, -B1, -B2, -B3, -psi) +end + @inline function density_pressure(u, equations::IdealGlmMhdEquations2D) rho, rho_v1, rho_v2, rho_v3, rho_e, B1, B2, B3, psi = u p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1^2 + rho_v2^2 + rho_v3^2) / rho @@ -1384,6 +1398,15 @@ end cons[9]^2 / 2) end +# State validation for Newton-bisection method of subcell IDP limiting +@inline function Base.isvalid(u, equations::IdealGlmMhdEquations2D) + p = pressure(u, equations) + if u[1] <= 0.0 || p <= 0.0 + return false + end + return true +end + # Calculate the cross helicity (\vec{v}⋅\vec{B}) for a conservative state `cons' @inline function cross_helicity(cons, ::IdealGlmMhdEquations2D) return (cons[2] * cons[6] + cons[3] * cons[7] + cons[4] * cons[8]) / cons[1] diff --git a/src/solvers/dgsem_tree/subcell_limiters.jl b/src/solvers/dgsem_tree/subcell_limiters.jl index 055e7ce24a4..e433c953779 100644 --- a/src/solvers/dgsem_tree/subcell_limiters.jl +++ b/src/solvers/dgsem_tree/subcell_limiters.jl @@ -16,18 +16,28 @@ end SubcellLimiterIDP(equations::AbstractEquations, basis; local_minmax_variables_cons = String[], positivity_variables_cons = String[], - positivity_correction_factor = 0.1) + positivity_variables_nonlinear = [], + positivity_correction_factor = 0.1, + max_iterations_newton = 10, + newton_tolerances = (1.0e-12, 1.0e-14), + gamma_constant_newton = 2 * ndims(equations)) Subcell invariant domain preserving (IDP) limiting used with [`VolumeIntegralSubcellLimiting`](@ref) including: - Local maximum/minimum Zalesak-type limiting for conservative variables (`local_minmax_variables_cons`) -- Positivity limiting for conservative variables (`positivity_variables_cons`) +- Positivity limiting for conservative variables (`positivity_variables_cons`) and nonlinear variables +(`positivity_variables_nonlinear`) Conservative variables to be limited are passed as a vector of strings, e.g. `local_minmax_variables_cons = ["rho"]` -and `positivity_variables_cons = ["rho"]`. +and `positivity_variables_cons = ["rho"]`. For nonlinear variables the specific functions are +passed in a vector, e.g. `positivity_variables_nonlinear = [pressure]`. The bounds are calculated using the low-order FV solution. The positivity limiter uses `positivity_correction_factor` such that `u^new >= positivity_correction_factor * u^FV`. +The limiting of nonlinear variables uses a Newton-bisection method with a maximum of +`max_iterations_newton` iterations, relative and absolute tolerances of `newton_tolerances` +and a provisional update constant `gamma_constant_newton` (`gamma_constant_newton>=2*d`, +where `d = #dimensions`). See equation (20) of Pazner (2020) and equation (30) of Rueda-Ramírez et al. (2022). !!! note This limiter and the correction callback [`SubcellLimiterIDPCorrection`](@ref) only work together. @@ -45,22 +55,32 @@ The bounds are calculated using the low-order FV solution. The positivity limite !!! warning "Experimental implementation" This is an experimental feature and may change in future releases. """ -struct SubcellLimiterIDP{RealT <: Real, Cache} <: AbstractSubcellLimiter +struct SubcellLimiterIDP{RealT <: Real, LimitingVariablesNonlinear, Cache} <: + AbstractSubcellLimiter local_minmax::Bool local_minmax_variables_cons::Vector{Int} # Local mininum/maximum principles for conservative variables positivity::Bool positivity_variables_cons::Vector{Int} # Positivity for conservative variables + positivity_variables_nonlinear::LimitingVariablesNonlinear # Positivity for nonlinear variables positivity_correction_factor::RealT cache::Cache + max_iterations_newton::Int + newton_tolerances::Tuple{RealT, RealT} # Relative and absolute tolerances for Newton's method + gamma_constant_newton::RealT # Constant for the subcell limiting of convex (nonlinear) constraints end # this method is used when the limiter is constructed as for shock-capturing volume integrals function SubcellLimiterIDP(equations::AbstractEquations, basis; local_minmax_variables_cons = String[], positivity_variables_cons = String[], - positivity_correction_factor = 0.1) + positivity_variables_nonlinear = [], + positivity_correction_factor = 0.1, + max_iterations_newton = 10, + newton_tolerances = (1.0e-12, 1.0e-14), + gamma_constant_newton = 2 * ndims(equations)) local_minmax = (length(local_minmax_variables_cons) > 0) - positivity = (length(positivity_variables_cons) > 0) + positivity = (length(positivity_variables_cons) + + length(positivity_variables_nonlinear) > 0) local_minmax_variables_cons_ = get_variable_index.(local_minmax_variables_cons, equations) @@ -80,13 +100,20 @@ function SubcellLimiterIDP(equations::AbstractEquations, basis; bound_keys = (bound_keys..., Symbol(string(v), "_min")) end end + for variable in positivity_variables_nonlinear + bound_keys = (bound_keys..., Symbol(string(variable), "_min")) + end cache = create_cache(SubcellLimiterIDP, equations, basis, bound_keys) SubcellLimiterIDP{typeof(positivity_correction_factor), + typeof(positivity_variables_nonlinear), typeof(cache)}(local_minmax, local_minmax_variables_cons_, positivity, positivity_variables_cons_, - positivity_correction_factor, cache) + positivity_variables_nonlinear, + positivity_correction_factor, cache, + max_iterations_newton, newton_tolerances, + gamma_constant_newton) end function Base.show(io::IO, limiter::SubcellLimiterIDP) @@ -97,10 +124,15 @@ function Base.show(io::IO, limiter::SubcellLimiterIDP) if !(local_minmax || positivity) print(io, "No limiter selected => pure DG method") else - print(io, "limiter=(") - local_minmax && print(io, "min/max limiting, ") - positivity && print(io, "positivity") - print(io, "), ") + features = String[] + if local_minmax + push!(features, "local min/max") + end + if positivity + push!(features, "positivity") + end + join(io, features, ", ") + print(io, "Limiter=($features), ") end print(io, "Local bounds with FV solution") print(io, ")") @@ -120,15 +152,15 @@ function Base.show(io::IO, ::MIME"text/plain", limiter::SubcellLimiterIDP) if local_minmax setup = [ setup..., - "" => "local maximum/minimum bounds for conservative variables $(limiter.local_minmax_variables_cons)", + "" => "Local maximum/minimum limiting for conservative variables $(limiter.local_minmax_variables_cons)", ] end if positivity - string = "positivity for conservative variables $(limiter.positivity_variables_cons)" + string = "Positivity limiting for conservative variables $(limiter.positivity_variables_cons) and $(limiter.positivity_variables_nonlinear)" setup = [setup..., "" => string] setup = [ setup..., - "" => " positivity correction factor = $(limiter.positivity_correction_factor)", + "" => "- with positivity correction factor = $(limiter.positivity_correction_factor)", ] end setup = [ diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 3d272359fe4..3f7954c8958 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -5,6 +5,10 @@ @muladd begin #! format: noindent +############################################################################### +# IDP Limiting +############################################################################### + # this method is used when the limiter is constructed as for shock-capturing volume integrals function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquations{2}, basis::LobattoLegendreBasis, bound_keys) @@ -66,6 +70,9 @@ function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSE return nothing end +############################################################################### +# Calculation of local bounds using low-order FV solution + @inline function calc_bounds_twosided!(var_min, var_max, variable, u, t, semi) mesh, equations, dg, cache = mesh_equations_solver_cache(semi) # Calc bounds inside elements @@ -164,6 +171,9 @@ end return nothing end +############################################################################### +# Local minimum/maximum limiting + @inline function idp_local_minmax!(alpha, limiter, u, t, dt, semi) for variable in limiter.local_minmax_variables_cons idp_local_minmax!(alpha, limiter, u, t, dt, semi, variable) @@ -233,16 +243,36 @@ end return nothing end +############################################################################### +# Global positivity limiting + @inline function idp_positivity!(alpha, limiter, u, dt, semi) # Conservative variables for variable in limiter.positivity_variables_cons - idp_positivity!(alpha, limiter, u, dt, semi, variable) + @trixi_timeit timer() "conservative variables" idp_positivity_conservative!(alpha, + limiter, + u, + dt, + semi, + variable) + end + + # Nonlinear variables + for variable in limiter.positivity_variables_nonlinear + @trixi_timeit timer() "nonlinear variables" idp_positivity_nonlinear!(alpha, + limiter, + u, dt, + semi, + variable) end return nothing end -@inline function idp_positivity!(alpha, limiter, u, dt, semi, variable) +############################################################################### +# Global positivity limiting of conservative variables + +@inline function idp_positivity_conservative!(alpha, limiter, u, dt, semi, variable) mesh, equations, dg, cache = mesh_equations_solver_cache(semi) (; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes (; inverse_weights) = dg.basis @@ -256,7 +286,7 @@ end for j in eachnode(dg), i in eachnode(dg) var = u[variable, i, j, element] if var < 0 - error("Safe $variable is not safe. element=$element, node: $i $j, value=$var") + error("Safe low-order method produces negative value for conservative variable $variable. Try a smaller time step.") end # Compute bound @@ -302,4 +332,183 @@ end return nothing end + +@inline function idp_positivity_nonlinear!(alpha, limiter, u, dt, semi, variable) + _, equations, dg, cache = mesh_equations_solver_cache(semi) + (; positivity_correction_factor) = limiter + + (; variable_bounds) = limiter.cache.subcell_limiter_coefficients + var_min = variable_bounds[Symbol(string(variable), "_min")] + + @threaded for element in eachelement(dg, semi.cache) + inverse_jacobian = cache.elements.inverse_jacobian[element] + for j in eachnode(dg), i in eachnode(dg) + # Compute bound + u_local = get_node_vars(u, equations, dg, i, j, element) + var = variable(u_local, equations) + if var < 0 + error("Safe low-order method produces negative value for variable $variable. Try a smaller time step.") + end + var_min[i, j, element] = positivity_correction_factor * var + + # Perform Newton's bisection method to find new alpha + newton_loops_alpha!(alpha, var_min[i, j, element], u_local, i, j, element, + variable, initial_check_nonnegative_newton_idp, + final_check_nonnegative_newton_idp, inverse_jacobian, + dt, equations, dg, cache, limiter) + end + end + + return nothing +end + +@inline function newton_loops_alpha!(alpha, bound, u, i, j, element, variable, + initial_check, final_check, inverse_jacobian, dt, + equations, dg, cache, limiter) + (; inverse_weights) = dg.basis + (; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes + + (; gamma_constant_newton) = limiter + + # negative xi direction + antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[i] * + get_node_vars(antidiffusive_flux1_R, equations, dg, i, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # positive xi direction + antidiffusive_flux = -gamma_constant_newton * inverse_jacobian * + inverse_weights[i] * + get_node_vars(antidiffusive_flux1_L, equations, dg, i + 1, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # negative eta direction + antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[j] * + get_node_vars(antidiffusive_flux2_R, equations, dg, i, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # positive eta direction + antidiffusive_flux = -gamma_constant_newton * inverse_jacobian * + inverse_weights[j] * + get_node_vars(antidiffusive_flux2_L, equations, dg, i, j + 1, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + return nothing +end + +@inline function newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, + final_check, equations, dt, limiter, antidiffusive_flux) + newton_reltol, newton_abstol = limiter.newton_tolerances + + beta = 1 - alpha[i, j, element] + + beta_L = 0 # alpha = 1 + beta_R = beta # No higher beta (lower alpha) than the current one + + u_curr = u + beta * dt * antidiffusive_flux + + # If state is valid, perform initial check and return if correction is not needed + if isvalid(u_curr, equations) + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + + initial_check(bound, goal, newton_abstol) && return nothing + end + + # Newton iterations + for iter in 1:(limiter.max_iterations_newton) + beta_old = beta + + # If the state is valid, evaluate d(goal)/d(beta) + if isvalid(u_curr, equations) + dgoal_dbeta = dgoal_function_newton_idp(variable, u_curr, dt, + antidiffusive_flux, equations) + else # Otherwise, perform a bisection step + dgoal_dbeta = 0 + end + + if dgoal_dbeta != 0 + # Update beta with Newton's method + beta = beta - goal / dgoal_dbeta + end + + # Check bounds + if (beta < beta_L) || (beta > beta_R) || (dgoal_dbeta == 0) || isnan(beta) + # Out of bounds, do a bisection step + beta = 0.5 * (beta_L + beta_R) + # Get new u + u_curr = u + beta * dt * antidiffusive_flux + + # If the state is invalid, finish bisection step without checking tolerance and iterate further + if !isvalid(u_curr, equations) + beta_R = beta + continue + end + + # Check new beta for condition and update bounds + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + if initial_check(bound, goal, newton_abstol) + # New beta fulfills condition + beta_L = beta + else + # New beta does not fulfill condition + beta_R = beta + end + else + # Get new u + u_curr = u + beta * dt * antidiffusive_flux + + # If the state is invalid, redefine right bound without checking tolerance and iterate further + if !isvalid(u_curr, equations) + beta_R = beta + continue + end + + # Evaluate goal function + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + end + + # Check relative tolerance + if abs(beta_old - beta) <= newton_reltol + break + end + + # Check absolute tolerance + if final_check(bound, goal, newton_abstol) + break + end + end + + new_alpha = 1 - beta + if alpha[i, j, element] > new_alpha + newton_abstol + error("Alpha is getting smaller. old: $(alpha[i, j, element]), new: $new_alpha") + else + alpha[i, j, element] = new_alpha + end + + return nothing +end + +### Auxiliary routines for Newton's bisection method ### +# Initial checks +@inline initial_check_nonnegative_newton_idp(bound, goal, newton_abstol) = goal <= 0 + +# Goal and d(Goal)d(u) function +@inline goal_function_newton_idp(variable, bound, u, equations) = bound - + variable(u, equations) +@inline function dgoal_function_newton_idp(variable, u, dt, antidiffusive_flux, + equations) + -dot(gradient_conservative(variable, u, equations), dt * antidiffusive_flux) +end + +# Final checks +@inline function final_check_nonnegative_newton_idp(bound, goal, newton_abstol) + (goal <= eps()) && (goal > -max(newton_abstol, abs(bound) * newton_abstol)) +end end # @muladd diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl index 61b5c54b5e9..b937abe92c0 100644 --- a/test/test_tree_2d_euler.jl +++ b/test/test_tree_2d_euler.jl @@ -581,6 +581,32 @@ end end end +@trixi_testset "elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl"), + l2=[ + 0.42185634563805724, + 0.1686471269704017, + 0.18240674916968103, + 0.17858250604280654, + ], + linf=[ + 1.7012978064377158, + 0.7149714986746726, + 0.5822547982757897, + 0.7300051017382696, + ], + tspan=(0.0, 2.0)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 + end +end + @trixi_testset "elixir_euler_colliding_flow.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_colliding_flow.jl"), l2=[ diff --git a/test/test_tree_2d_mhd.jl b/test/test_tree_2d_mhd.jl index 953c077c0a3..1f8458075aa 100644 --- a/test/test_tree_2d_mhd.jl +++ b/test/test_tree_2d_mhd.jl @@ -332,24 +332,28 @@ end @trixi_testset "elixir_mhd_shockcapturing_subcell.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_shockcapturing_subcell.jl"), - l2=[2.9974425783503109e-02, - 7.2849646345685956e-02, - 7.2488477174662239e-02, + l2=[ + 3.2064026219236076e-02, + 7.2461094392606618e-02, + 7.2380202888062711e-02, 0.0000000000000000e+00, - 1.2507971380965512e+00, - 1.8929505145499678e-02, - 1.2218606317164420e-02, + 8.6293936673145932e-01, + 8.4091669534557805e-03, + 5.2156364913231732e-03, 0.0000000000000000e+00, - 3.0154796910479838e-03], - linf=[3.2147382412340830e-01, - 1.3709471664007811e+00, - 1.3465154685288383e+00, + 2.0786952301129021e-04, + ], + linf=[ + 3.8778760255775635e-01, + 9.4666683953698927e-01, + 9.4618924645661928e-01, 0.0000000000000000e+00, - 1.6051257523415284e+01, - 3.0564266749926644e-01, - 2.3908016329805595e-01, + 1.0980297261521951e+01, + 1.0264404591009069e-01, + 1.0655686942176350e-01, 0.0000000000000000e+00, - 1.3711262178549158e-01], + 6.1013422157115546e-03, + ], tspan=(0.0, 0.003)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_unit.jl b/test/test_unit.jl index e8a8effbe29..7943d952f71 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -416,7 +416,8 @@ end indicator_hg = IndicatorHennemannGassner(1.0, 0.0, true, "variable", "cache") @test_nowarn show(stdout, indicator_hg) - limiter_idp = SubcellLimiterIDP(true, [1], true, [1], 0.1, "cache") + limiter_idp = SubcellLimiterIDP(true, [1], true, [1], ["variable"], 0.1, "cache", 1, + (1.0, 1.0), 1.0) @test_nowarn show(stdout, limiter_idp) # TODO: TrixiShallowWater: move unit test @@ -1220,6 +1221,26 @@ end end end +@testset "Consistency check for `gradient_conservative` routine" begin + # Set up conservative variables, equations + u = [ + 0.5011914484393387, + 0.8829127712445113, + 0.43024132987932817, + 0.7560616633050348, + ] + + equations = CompressibleEulerEquations2D(1.4) + + # Define wrapper function for pressure in order to call default implementation + function pressure_test(u, equations) + return pressure(u, equations) + end + + @test Trixi.gradient_conservative(pressure_test, u, equations) ≈ + Trixi.gradient_conservative(pressure, u, equations) +end + @testset "Equivalent Fluxes" begin # Set up equations and dummy conservative variables state # Burgers' Equation From e2c92f32457e22d6f8b766bf1ecd7a25d413cc6e Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 1 Feb 2024 09:35:16 +0100 Subject: [PATCH 091/166] Move Jacobian for para P4est to respective files, add muladd (#1807) * Move Jacobian for para P4est to respective files, add muladd * fmt * compare checks without muladd * update test val for muladd * test vals --------- Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> --- src/solvers/dgsem_p4est/dg_2d_parabolic.jl | 176 +++++++++++++++------ src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 137 +++++++++++----- src/solvers/dgsem_tree/dg_2d_parabolic.jl | 18 --- src/solvers/dgsem_tree/dg_3d_parabolic.jl | 18 --- test/test_parabolic_2d.jl | 4 +- 5 files changed, 228 insertions(+), 125 deletions(-) diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index 299f2f6140a..ed21f371449 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -1,7 +1,15 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + # This method is called when a SemidiscretizationHyperbolicParabolic is constructed. # It constructs the basic `cache` used throughout the simulation to compute # the RHS etc. -function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::AbstractEquations, +function create_cache_parabolic(mesh::P4estMesh{2}, + equations_hyperbolic::AbstractEquations, equations_parabolic::AbstractEquationsParabolic, dg::DG, parabolic_scheme, RealT, uEltype) balance!(mesh) @@ -167,12 +175,14 @@ function calc_gradient!(gradients, u_transformed, t, element) for ii in eachnode(dg) - multiply_add_to_node_vars!(gradients_x, derivative_dhat[ii, i], u_node, + multiply_add_to_node_vars!(gradients_x, derivative_dhat[ii, i], + u_node, equations_parabolic, dg, ii, j, element) end for jj in eachnode(dg) - multiply_add_to_node_vars!(gradients_y, derivative_dhat[jj, j], u_node, + multiply_add_to_node_vars!(gradients_y, derivative_dhat[jj, j], + u_node, equations_parabolic, dg, i, jj, element) end end @@ -185,9 +195,11 @@ function calc_gradient!(gradients, u_transformed, t, Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element) - gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg, + gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, + dg, i, j, element) - gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, dg, + gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, + dg, i, j, element) # note that the contravariant vectors are transposed compared with computations of flux @@ -199,9 +211,11 @@ function calc_gradient!(gradients, u_transformed, t, gradient_y_node = Ja12 * gradients_reference_1 + Ja22 * gradients_reference_2 - set_node_vars!(gradients_x, gradient_x_node, equations_parabolic, dg, i, j, + set_node_vars!(gradients_x, gradient_x_node, equations_parabolic, dg, i, + j, element) - set_node_vars!(gradients_y, gradient_y_node, equations_parabolic, dg, i, j, + set_node_vars!(gradients_y, gradient_y_node, equations_parabolic, dg, i, + j, element) end end @@ -219,7 +233,8 @@ function calc_gradient!(gradients, u_transformed, t, @trixi_timeit timer() "interface flux" begin calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, False(), # False() = no nonconservative terms - equations_parabolic, dg.surface_integral, dg, cache_parabolic) + equations_parabolic, dg.surface_integral, dg, + cache_parabolic) end # Prolong solution to boundaries @@ -231,7 +246,8 @@ function calc_gradient!(gradients, u_transformed, t, # Calculate boundary fluxes @trixi_timeit timer() "boundary flux" begin calc_boundary_flux_gradients!(cache_parabolic, t, boundary_conditions_parabolic, - mesh, equations_parabolic, dg.surface_integral, dg) + mesh, equations_parabolic, dg.surface_integral, + dg) end # Prolong solution to mortars. This resues the hyperbolic version of `prolong2mortars` @@ -268,70 +284,94 @@ function calc_gradient!(gradients, u_transformed, t, # Compute x-component of gradients # surface at -x - normal_direction_x, _ = get_normal_direction(1, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(1, + contravariant_vectors, 1, l, element) gradients_x[v, 1, l, element] = (gradients_x[v, 1, l, element] + - surface_flux_values[v, l, 1, element] * + surface_flux_values[v, l, 1, + element] * factor_1 * normal_direction_x) # surface at +x - normal_direction_x, _ = get_normal_direction(2, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(2, + contravariant_vectors, nnodes(dg), l, element) - gradients_x[v, nnodes(dg), l, element] = (gradients_x[v, nnodes(dg), l, + gradients_x[v, nnodes(dg), l, element] = (gradients_x[v, nnodes(dg), + l, element] + - surface_flux_values[v, l, 2, + surface_flux_values[v, l, + 2, element] * - factor_2 * normal_direction_x) + factor_2 * + normal_direction_x) # surface at -y - normal_direction_x, _ = get_normal_direction(3, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(3, + contravariant_vectors, l, 1, element) gradients_x[v, l, 1, element] = (gradients_x[v, l, 1, element] + - surface_flux_values[v, l, 3, element] * + surface_flux_values[v, l, 3, + element] * factor_1 * normal_direction_x) # surface at +y - normal_direction_x, _ = get_normal_direction(4, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(4, + contravariant_vectors, l, nnodes(dg), element) - gradients_x[v, l, nnodes(dg), element] = (gradients_x[v, l, nnodes(dg), + gradients_x[v, l, nnodes(dg), element] = (gradients_x[v, l, + nnodes(dg), element] + - surface_flux_values[v, l, 4, + surface_flux_values[v, l, + 4, element] * - factor_2 * normal_direction_x) + factor_2 * + normal_direction_x) # Compute y-component of gradients # surface at -x - _, normal_direction_y = get_normal_direction(1, contravariant_vectors, + _, normal_direction_y = get_normal_direction(1, + contravariant_vectors, 1, l, element) gradients_y[v, 1, l, element] = (gradients_y[v, 1, l, element] + - surface_flux_values[v, l, 1, element] * + surface_flux_values[v, l, 1, + element] * factor_1 * normal_direction_y) # surface at +x - _, normal_direction_y = get_normal_direction(2, contravariant_vectors, + _, normal_direction_y = get_normal_direction(2, + contravariant_vectors, nnodes(dg), l, element) - gradients_y[v, nnodes(dg), l, element] = (gradients_y[v, nnodes(dg), l, + gradients_y[v, nnodes(dg), l, element] = (gradients_y[v, nnodes(dg), + l, element] + - surface_flux_values[v, l, 2, + surface_flux_values[v, l, + 2, element] * - factor_2 * normal_direction_y) + factor_2 * + normal_direction_y) # surface at -y - _, normal_direction_y = get_normal_direction(3, contravariant_vectors, + _, normal_direction_y = get_normal_direction(3, + contravariant_vectors, l, 1, element) gradients_y[v, l, 1, element] = (gradients_y[v, l, 1, element] + - surface_flux_values[v, l, 3, element] * + surface_flux_values[v, l, 3, + element] * factor_1 * normal_direction_y) # surface at +y - _, normal_direction_y = get_normal_direction(4, contravariant_vectors, + _, normal_direction_y = get_normal_direction(4, + contravariant_vectors, l, nnodes(dg), element) - gradients_y[v, l, nnodes(dg), element] = (gradients_y[v, l, nnodes(dg), + gradients_y[v, l, nnodes(dg), element] = (gradients_y[v, l, + nnodes(dg), element] + - surface_flux_values[v, l, 4, + surface_flux_values[v, l, + 4, element] * - factor_2 * normal_direction_y) + factor_2 * + normal_direction_y) end end end @@ -444,24 +484,30 @@ function calc_volume_integral!(du, flux_viscous, @threaded for element in eachelement(dg, cache) # Calculate volume terms in one element for j in eachnode(dg), i in eachnode(dg) - flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, element) - flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, element) + flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, + element) + flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, + element) # Compute the contravariant flux by taking the scalar product of the # first contravariant vector Ja^1 and the flux vector - Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, element) + Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, + element) contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 for ii in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[ii, i], contravariant_flux1, + multiply_add_to_node_vars!(du, derivative_dhat[ii, i], + contravariant_flux1, equations_parabolic, dg, ii, j, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element) + Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, + element) contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 for jj in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[jj, j], contravariant_flux2, + multiply_add_to_node_vars!(du, derivative_dhat[jj, j], + contravariant_flux2, equations_parabolic, dg, i, jj, element) end end @@ -503,7 +549,8 @@ function prolong2interfaces!(cache_parabolic, flux_viscous, # this is the outward normal direction on the primary element normal_direction = get_normal_direction(primary_direction, contravariant_vectors, - i_primary, j_primary, primary_element) + i_primary, j_primary, + primary_element) for v in eachvariable(equations_parabolic) # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*! @@ -602,7 +649,8 @@ function calc_interface_flux!(surface_flux_values, # primary element. We assume a BR-1 type of flux. viscous_flux_normal_ll, viscous_flux_normal_rr = get_surface_node_vars(cache_parabolic.interfaces.u, equations_parabolic, - dg, node, + dg, + node, interface) flux = 0.5 * (viscous_flux_normal_ll + viscous_flux_normal_rr) @@ -624,9 +672,11 @@ function calc_interface_flux!(surface_flux_values, end function prolong2mortars_divergence!(cache, flux_viscous::Vector{Array{uEltype, 4}}, - mesh::Union{P4estMesh{2}, T8codeMesh{2}}, equations, + mesh::Union{P4estMesh{2}, T8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, - surface_integral, dg::DGSEM) where {uEltype <: Real} + surface_integral, + dg::DGSEM) where {uEltype <: Real} @unpack neighbor_ids, node_indices = cache.mortars @unpack contravariant_vectors = cache.elements index_range = eachnode(dg) @@ -683,7 +733,8 @@ function prolong2mortars_divergence!(cache, flux_viscous::Vector{Array{uEltype, j_large = j_large_start element = neighbor_ids[3, mortar] for i in eachnode(dg) - normal_direction = get_normal_direction(direction_index, contravariant_vectors, + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, i_large, j_large, element) for v in eachvariable(equations) @@ -732,8 +783,10 @@ function calc_mortar_flux_divergence!(surface_flux_values, for position in 1:2 for node in eachnode(dg) for v in eachvariable(equations) - viscous_flux_normal_ll = cache.mortars.u[1, v, position, node, mortar] - viscous_flux_normal_rr = cache.mortars.u[2, v, position, node, mortar] + viscous_flux_normal_ll = cache.mortars.u[1, v, position, node, + mortar] + viscous_flux_normal_rr = cache.mortars.u[2, v, position, node, + mortar] # TODO: parabolic; only BR1 at the moment fstar[position][v, node] = 0.5 * (viscous_flux_normal_ll + @@ -824,7 +877,8 @@ end function calc_boundary_flux_gradients!(cache, t, boundary_condition::Union{BoundaryConditionPeriodic, BoundaryConditionDoNothing}, - mesh::P4estMesh, equations, surface_integral, dg::DG) + mesh::P4estMesh, equations, surface_integral, + dg::DG) @assert isempty(eachboundary(dg, cache)) end @@ -913,7 +967,8 @@ function calc_boundary_flux!(cache, t, boundary_index) # Outward-pointing normal direction (not normalized) - normal_direction = get_normal_direction(direction_index, contravariant_vectors, + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, i_node, j_node, element) # TODO: revisit if we want more general boundary treatments. @@ -922,11 +977,13 @@ function calc_boundary_flux!(cache, t, flux_inner = u_inner # Coordinates at boundary node - x = get_node_coords(node_coordinates, equations_parabolic, dg, i_node, j_node, + x = get_node_coords(node_coordinates, equations_parabolic, dg, i_node, + j_node, element) flux_ = boundary_condition_parabolic(flux_inner, u_inner, normal_direction, - x, t, operator_type, equations_parabolic) + x, t, operator_type, + equations_parabolic) # Copy flux to element storage in the correct orientation for v in eachvariable(equations_parabolic) @@ -938,3 +995,22 @@ function calc_boundary_flux!(cache, t, end end end + +function apply_jacobian_parabolic!(du, mesh::P4estMesh{2}, + equations::AbstractEquationsParabolic, + dg::DG, cache) + @unpack inverse_jacobian = cache.elements + + @threaded for element in eachelement(dg, cache) + for j in eachnode(dg), i in eachnode(dg) + factor = inverse_jacobian[i, j, element] + + for v in eachvariable(equations) + du[v, i, j, element] *= factor + end + end + end + + return nothing +end +end # @muladd diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index 83d663809a7..63d431d35d5 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -1,7 +1,15 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + # This method is called when a SemidiscretizationHyperbolicParabolic is constructed. # It constructs the basic `cache` used throughout the simulation to compute # the RHS etc. -function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::AbstractEquations, +function create_cache_parabolic(mesh::P4estMesh{3}, + equations_hyperbolic::AbstractEquations, equations_parabolic::AbstractEquationsParabolic, dg::DG, parabolic_scheme, RealT, uEltype) balance!(mesh) @@ -73,11 +81,14 @@ function calc_gradient!(gradients, u_transformed, t, Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k, element) - gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg, + gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, + dg, i, j, k, element) - gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, dg, + gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, + dg, i, j, k, element) - gradients_reference_3 = get_node_vars(gradients_z, equations_parabolic, dg, + gradients_reference_3 = get_node_vars(gradients_z, equations_parabolic, + dg, i, j, k, element) # note that the contravariant vectors are transposed compared with computations of flux @@ -115,7 +126,8 @@ function calc_gradient!(gradients, u_transformed, t, @trixi_timeit timer() "interface flux" begin calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, False(), # False() = no nonconservative terms - equations_parabolic, dg.surface_integral, dg, cache_parabolic) + equations_parabolic, dg.surface_integral, dg, + cache_parabolic) end # Prolong solution to boundaries @@ -127,7 +139,8 @@ function calc_gradient!(gradients, u_transformed, t, # Calculate boundary fluxes @trixi_timeit timer() "boundary flux" begin calc_boundary_flux_gradients!(cache_parabolic, t, boundary_conditions_parabolic, - mesh, equations_parabolic, dg.surface_integral, dg) + mesh, equations_parabolic, dg.surface_integral, + dg) end # Prolong solution to mortars. These should reuse the hyperbolic version of `prolong2mortars` @@ -165,7 +178,8 @@ function calc_gradient!(gradients, u_transformed, t, for dim in 1:3 grad = gradients[dim] # surface at -x - normal_direction = get_normal_direction(1, contravariant_vectors, + normal_direction = get_normal_direction(1, + contravariant_vectors, 1, l, m, element) grad[v, 1, l, m, element] = (grad[v, 1, l, m, element] + surface_flux_values[v, l, m, 1, @@ -173,18 +187,22 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +x - normal_direction = get_normal_direction(2, contravariant_vectors, - nnodes(dg), l, m, element) + normal_direction = get_normal_direction(2, + contravariant_vectors, + nnodes(dg), l, m, + element) grad[v, nnodes(dg), l, m, element] = (grad[v, nnodes(dg), l, m, element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 2, element] * factor_2 * normal_direction[dim]) # surface at -y - normal_direction = get_normal_direction(3, contravariant_vectors, + normal_direction = get_normal_direction(3, + contravariant_vectors, l, m, 1, element) grad[v, l, 1, m, element] = (grad[v, l, 1, m, element] + surface_flux_values[v, l, m, 3, @@ -192,18 +210,22 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +y - normal_direction = get_normal_direction(4, contravariant_vectors, - l, nnodes(dg), m, element) + normal_direction = get_normal_direction(4, + contravariant_vectors, + l, nnodes(dg), m, + element) grad[v, l, nnodes(dg), m, element] = (grad[v, l, nnodes(dg), m, element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 4, element] * factor_2 * normal_direction[dim]) # surface at -z - normal_direction = get_normal_direction(5, contravariant_vectors, + normal_direction = get_normal_direction(5, + contravariant_vectors, l, m, 1, element) grad[v, l, m, 1, element] = (grad[v, l, m, 1, element] + surface_flux_values[v, l, m, 5, @@ -211,11 +233,14 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +z - normal_direction = get_normal_direction(6, contravariant_vectors, - l, m, nnodes(dg), element) + normal_direction = get_normal_direction(6, + contravariant_vectors, + l, m, nnodes(dg), + element) grad[v, l, m, nnodes(dg), element] = (grad[v, l, m, nnodes(dg), element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 6, element] * factor_2 * @@ -366,37 +391,46 @@ function calc_volume_integral!(du, flux_viscous, @threaded for element in eachelement(dg, cache) # Calculate volume terms in one element for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, k, element) - flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, k, element) - flux3 = get_node_vars(flux_viscous_z, equations_parabolic, dg, i, j, k, element) + flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, k, + element) + flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, k, + element) + flux3 = get_node_vars(flux_viscous_z, equations_parabolic, dg, i, j, k, + element) # Compute the contravariant flux by taking the scalar product of the # first contravariant vector Ja^1 and the flux vector - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, k, + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, + k, element) contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 + Ja13 * flux3 for ii in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[ii, i], contravariant_flux1, + multiply_add_to_node_vars!(du, derivative_dhat[ii, i], + contravariant_flux1, equations_parabolic, dg, ii, j, k, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, k, + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, + k, element) contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 + Ja23 * flux3 for jj in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[jj, j], contravariant_flux2, + multiply_add_to_node_vars!(du, derivative_dhat[jj, j], + contravariant_flux2, equations_parabolic, dg, i, jj, k, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k, + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, + k, element) contravariant_flux3 = Ja31 * flux1 + Ja32 * flux2 + Ja33 * flux3 for kk in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[kk, k], contravariant_flux3, + multiply_add_to_node_vars!(du, derivative_dhat[kk, k], + contravariant_flux3, equations_parabolic, dg, i, j, kk, element) end end @@ -574,7 +608,8 @@ function calc_interface_flux!(surface_flux_values, viscous_flux_normal_ll, viscous_flux_normal_rr = get_surface_node_vars(cache_parabolic.interfaces.u, equations_parabolic, dg, - i, j, + i, + j, interface) flux = 0.5 * (viscous_flux_normal_ll + viscous_flux_normal_rr) @@ -606,7 +641,8 @@ function calc_interface_flux!(surface_flux_values, end function prolong2mortars_divergence!(cache, flux_viscous, - mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack neighbor_ids, node_indices = cache.mortars @@ -642,11 +678,14 @@ function prolong2mortars_divergence!(cache, flux_viscous, element) for v in eachvariable(equations) - flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, k_small, + flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, + k_small, element], - flux_viscous_y[v, i_small, j_small, k_small, + flux_viscous_y[v, i_small, j_small, + k_small, element], - flux_viscous_z[v, i_small, j_small, k_small, + flux_viscous_z[v, i_small, j_small, + k_small, element]) cache.mortars.u[1, v, position, i, j, mortar] = dot(flux_viscous, @@ -688,7 +727,8 @@ function prolong2mortars_divergence!(cache, flux_viscous, for i in eachnode(dg) normal_direction = get_normal_direction(direction_index, contravariant_vectors, - i_large, j_large, k_large, element) + i_large, j_large, k_large, + element) for v in eachvariable(equations) flux_viscous = SVector(flux_viscous_x[v, i_large, j_large, k_large, @@ -827,7 +867,8 @@ end # TODO: parabolic; only BR1 at the moment flux_ = 0.5 * (u_ll + u_rr) # Copy flux to buffer - set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, position_index) + set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, + position_index) end # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` @@ -862,7 +903,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous, for j in eachnode(dg) for i in eachnode(dg) # this is the outward normal direction on the primary element - normal_direction = get_normal_direction(direction, contravariant_vectors, + normal_direction = get_normal_direction(direction, + contravariant_vectors, i_node, j_node, k_node, element) for v in eachvariable(equations_parabolic) @@ -873,7 +915,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous, flux_viscous_z[v, i_node, j_node, k_node, element]) - boundaries.u[v, i, j, boundary] = dot(flux_viscous, normal_direction) + boundaries.u[v, i, j, boundary] = dot(flux_viscous, + normal_direction) end i_node += i_node_step_i j_node += j_node_step_i @@ -940,7 +983,8 @@ function calc_boundary_flux!(cache, t, j_node, k_node, element) - flux_ = boundary_condition_parabolic(flux_inner, u_inner, normal_direction, + flux_ = boundary_condition_parabolic(flux_inner, u_inner, + normal_direction, x, t, operator_type, equations_parabolic) @@ -959,3 +1003,22 @@ function calc_boundary_flux!(cache, t, end end end + +function apply_jacobian_parabolic!(du, mesh::P4estMesh{3}, + equations::AbstractEquationsParabolic, + dg::DG, cache) + @unpack inverse_jacobian = cache.elements + + @threaded for element in eachelement(dg, cache) + for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) + factor = inverse_jacobian[i, j, k, element] + + for v in eachvariable(equations) + du[v, i, j, k, element] *= factor + end + end + end + + return nothing +end +end # @muladd diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index b1c27343999..a6c962e03cd 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -951,22 +951,4 @@ function apply_jacobian_parabolic!(du, mesh::TreeMesh{2}, return nothing end - -function apply_jacobian_parabolic!(du, mesh::P4estMesh{2}, - equations::AbstractEquationsParabolic, - dg::DG, cache) - @unpack inverse_jacobian = cache.elements - - @threaded for element in eachelement(dg, cache) - for j in eachnode(dg), i in eachnode(dg) - factor = inverse_jacobian[i, j, element] - - for v in eachvariable(equations) - du[v, i, j, element] *= factor - end - end - end - - return nothing -end end # @muladd diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index ee0e7c6b069..d5504744742 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -1033,22 +1033,4 @@ function apply_jacobian_parabolic!(du, mesh::TreeMesh{3}, return nothing end - -function apply_jacobian_parabolic!(du, mesh::P4estMesh{3}, - equations::AbstractEquationsParabolic, - dg::DG, cache) - @unpack inverse_jacobian = cache.elements - - @threaded for element in eachelement(dg, cache) - for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - factor = inverse_jacobian[i, j, k, element] - - for v in eachvariable(equations) - du[v, i, j, k, element] *= factor - end - end - end - - return nothing -end end # @muladd diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index 6632cd0bb27..f7185a1a904 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -561,8 +561,8 @@ end @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_nonperiodic_amr.jl"), tspan=(0.0, 0.01), - l2=[0.00793438523666649], - linf=[0.11030633127144573]) + l2=[0.007933791324450538], + linf=[0.11029480573492567]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let From 3ed62fb4e3bbd034b49fa452e4034c909b3a549b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Feb 2024 18:01:29 +0100 Subject: [PATCH 092/166] Bump actions/cache from 3 to 4 (#1828) Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmark.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 6aa4809c1c2..4531c3aee0a 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -26,7 +26,7 @@ jobs: version: ${{ matrix.version }} arch: ${{ matrix.arch }} show-versioninfo: true - - uses: actions/cache@v3 + - uses: actions/cache@v4 env: cache-name: cache-artifacts with: From db1d7054b7832cb56dddda1076921dfd0476a2b2 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 2 Feb 2024 08:46:49 +0100 Subject: [PATCH 093/166] fix typo leafs -> leaves --- src/meshes/t8code_mesh.jl | 38 +++++++++++++++++++------------------- test/test_parabolic_2d.jl | 12 ++++++------ test/test_parabolic_3d.jl | 28 ++++++++++++++-------------- 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index 6fb4d861d10..cb2ac787e14 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -1,7 +1,7 @@ """ T8codeMesh{NDIMS} <: AbstractMesh{NDIMS} -An unstructured curved mesh based on trees that uses the C library +An unstructured curved mesh based on trees that uses the C library ['t8code'](https://github.com/DLR-AMR/t8code) to manage trees and mesh refinement. """ @@ -485,7 +485,7 @@ end # form a family and we decide whether this family should be coarsened # or only the first element should be refined. # Otherwise `is_family` must equal zero and we consider the first entry -# of the element array for refinement. +# of the element array for refinement. # Entries of the element array beyond the first `num_elements` are undefined. # \param [in] forest the forest to which the new elements belong # \param [in] forest_from the forest that is adapted. @@ -542,8 +542,8 @@ Adapt a `T8codeMesh` according to a user-defined `adapt_callback`. 0 : Stay unchanged. 1 : Refine element. -- `kwargs`: - - `recursive = true`: Adapt the forest recursively. If true the caller must ensure that the callback +- `kwargs`: + - `recursive = true`: Adapt the forest recursively. If true the caller must ensure that the callback returns 0 for every analyzed element at some point to stop the recursion. - `balance = true`: Make sure the adapted forest is 2^(NDIMS-1):1 balanced. - `partition = true`: Partition the forest to redistribute elements evenly among MPI ranks. @@ -695,7 +695,7 @@ function count_interfaces(mesh::T8codeMesh) for iface in 0:(num_faces - 1) pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneighbor_leaves_ref = Ref{Ptr{Ptr{t8_element}}}() pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() dual_faces_ref = Ref{Ptr{Cint}}() @@ -704,7 +704,7 @@ function count_interfaces(mesh::T8codeMesh) forest_is_balanced = Cint(1) t8_forest_leaf_face_neighbors(mesh.forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, + pneighbor_leaves_ref, iface, dual_faces_ref, num_neighbors_ref, pelement_indices_ref, pneigh_scheme_ref, forest_is_balanced) @@ -713,13 +713,13 @@ function count_interfaces(mesh::T8codeMesh) dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_leaves = unsafe_wrap(Array, pneighbor_leaves_ref[], num_neighbors) neighbor_scheme = pneigh_scheme_ref[] if num_neighbors == 0 local_num_boundary += 1 else - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leaves[1]) if all(neighbor_ielements .< num_local_elements) # Conforming interface: The second condition ensures we @@ -745,7 +745,7 @@ function count_interfaces(mesh::T8codeMesh) neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, - neighbor_leafs[1], + neighbor_leaves[1], max_level) global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + dual_faces[1] @@ -759,7 +759,7 @@ function count_interfaces(mesh::T8codeMesh) end t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) + t8_free(pneighbor_leaves_ref[]) t8_free(pelement_indices_ref[]) end # for @@ -875,7 +875,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, end pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneighbor_leaves_ref = Ref{Ptr{Ptr{t8_element}}}() pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() dual_faces_ref = Ref{Ptr{Cint}}() @@ -885,7 +885,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, # Query neighbor information from t8code. t8_forest_leaf_face_neighbors(mesh.forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, + pneighbor_leaves_ref, iface, dual_faces_ref, num_neighbors_ref, pelement_indices_ref, pneigh_scheme_ref, forest_is_balanced) @@ -894,7 +894,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_leaves = unsafe_wrap(Array, pneighbor_leaves_ref[], num_neighbors) neighbor_scheme = pneigh_scheme_ref[] # Now we check for the different cases. The nested if-structure is as follows: @@ -913,7 +913,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, # else: // `local mortar from smaller elements point of view` # // We only count local mortars once. # - # else: // It must be either a MPI interface or a MPI mortar. + # else: // It must be either a MPI interface or a MPI mortar. # # if `MPI interface`: # @@ -938,7 +938,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, # Interface or mortar. else - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leaves[1]) # Local interface or mortar. if all(neighbor_ielements .< num_local_elements) @@ -985,7 +985,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, - neighbor_leafs[1], + neighbor_leaves[1], max_level) if current_linear_id < neighbor_linear_id @@ -1029,7 +1029,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, num_local_elements) local_neighbor_ids = [neighbor_ids[i] for i in local_neighbor_positions] - local_neighbor_positions = [map_iface_to_ichild_to_position[dual_faces[1] + 1][t8_element_child_id(neighbor_scheme, neighbor_leafs[i]) + 1] + local_neighbor_positions = [map_iface_to_ichild_to_position[dual_faces[1] + 1][t8_element_child_id(neighbor_scheme, neighbor_leaves[i]) + 1] for i in local_neighbor_positions] # Last entry is the large element. @@ -1059,7 +1059,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, - neighbor_leafs[1], + neighbor_leaves[1], max_level) global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + dual_faces[1] @@ -1100,7 +1100,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, end t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) + t8_free(pneighbor_leaves_ref[]) t8_free(pelement_indices_ref[]) end # for iface diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index f7185a1a904..9f1382caa62 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -218,9 +218,9 @@ end "elixir_advection_diffusion.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 8 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 8)]) + num_leaves = length(LLID) + @assert num_leaves % 8 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 8)]) tspan = (0.0, 1.5) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), @@ -414,9 +414,9 @@ end "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0), initial_refinement_level=3) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 4 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 4)]) + num_leaves = length(LLID) + @assert num_leaves % 4 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 4)]) tspan = (0.0, 0.5) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 6fbfb8259d4..1eaa9f51a56 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -252,9 +252,9 @@ end "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 16 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 16)]) + num_leaves = length(LLID) + @assert num_leaves % 16 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 16)]) tspan = (0.0, 0.25) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; @@ -325,9 +325,9 @@ end "elixir_navierstokes_taylor_green_vortex.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 32 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 32)]) + num_leaves = length(LLID) + @assert num_leaves % 32 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 32)]) tspan = (0.0, 0.1) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver) @@ -429,8 +429,8 @@ end "elixir_advection_diffusion_amr.jl"), l2=[0.000355780485397024], linf=[0.0010810770271614256]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -444,8 +444,8 @@ end "elixir_advection_diffusion_nonperiodic.jl"), l2=[0.0009808996243280868], linf=[0.01732621559135459]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -472,8 +472,8 @@ end 0.12129218723807476, 0.8433893297612087, ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -495,8 +495,8 @@ end 0.6782397526873181, 0.17663702154066238, 0.17663702154066266, 0.17663702154066238, 1.7327849844825238, ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] From 7f7f058d0721bc1634e9fee69a8d136b9ea57bc7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Feb 2024 08:47:41 +0100 Subject: [PATCH 094/166] Bump crate-ci/typos from 1.16.26 to 1.18.0 (#1826) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.26 to 1.18.0. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.16.26...v1.18.0) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Hendrik Ranocha --- .github/workflows/SpellCheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index a780e975155..b242b6e811e 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.16.26 + uses: crate-ci/typos@v1.18.0 From fa129aa3be558df6246b7339ce2bc966d76bcc6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Feb 2024 14:47:13 +0100 Subject: [PATCH 095/166] Bump codecov/codecov-action from 3 to 4 (#1827) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3 to 4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v3...v4) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f287cc5feb2..2e388366fc8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,7 +129,7 @@ jobs: - uses: julia-actions/julia-processcoverage@v1 with: directories: src,examples,ext - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 with: file: ./lcov.info flags: unittests From ea61e26ff330e720b011a85a2f4b9040e54da870 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Mon, 5 Feb 2024 11:50:25 +0100 Subject: [PATCH 096/166] Add section about false sharing problems to documentation (#1819) * Add section to docs about false sharing * Fix typos * Fix typo * Implement suggestions --- docs/src/performance.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/src/performance.md b/docs/src/performance.md index df66f451b79..82d7f501f63 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -267,3 +267,14 @@ requires. It can thus be seen as a proxy for "energy used" and, as an extension, timing result, you need to set the analysis interval such that the `AnalysisCallback` is invoked at least once during the course of the simulation and discard the first PID value. + +## Performance issues with multi-threaded reductions +[False sharing](https://en.wikipedia.org/wiki/False_sharing) is a known performance issue +for systems with distributed caches. It also occurred for the implementation of a thread +parallel bounds checking routine for the subcell IDP limiting +in [PR #1736](https://github.com/trixi-framework/Trixi.jl/pull/1736). +After some [testing and discussion](https://github.com/trixi-framework/Trixi.jl/pull/1736#discussion_r1423881895), +it turned out that initializing a vector of length `n * Threads.nthreads()` and only using every +n-th entry instead of a vector of length `Threads.nthreads()` fixes the problem. +Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)`. +Now, the bounds checking routine of the IDP limiting scales as hoped. From 14151e636ef654cb5421b3a7e498a9d76ed46a64 Mon Sep 17 00:00:00 2001 From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Date: Mon, 5 Feb 2024 12:49:51 +0100 Subject: [PATCH 097/166] Add entry for gmsh tutorial in introduction (#1829) * add entry for gmsh tutorial in introduction * Update docs/literate/src/files/index.jl --------- Co-authored-by: Daniel Doehring Co-authored-by: Daniel Doehring --- docs/literate/src/files/index.jl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl index e259d25fb2f..26637e5b24b 100644 --- a/docs/literate/src/files/index.jl +++ b/docs/literate/src/files/index.jl @@ -108,20 +108,26 @@ # software in the Trixi.jl ecosystem, and then run a simulation using Trixi.jl on said mesh. # In the end, the tutorial briefly explains how to simulate an example using AMR via `P4estMesh`. -# ### [15 Explicit time stepping](@ref time_stepping) +# ### [15 P4est mesh from gmsh](@ref p4est_from_gmsh) +#- +# This tutorial describes how to obtain a [`P4estMesh`](@ref) from an existing mesh generated +# by [`gmsh`](https://gmsh.info/) or any other meshing software that can export to the Abaqus +# input `.inp` format. The tutorial demonstrates how edges/faces can be associated with boundary conditions based on the physical nodesets. + +# ### [16 Explicit time stepping](@ref time_stepping) #- # This tutorial is about time integration using [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl). # It explains how to use their algorithms and presents two types of time step choices - with error-based # and CFL-based adaptive step size control. -# ### [16 Differentiable programming](@ref differentiable_programming) +# ### [17 Differentiable programming](@ref differentiable_programming) #- # This part deals with some basic differentiable programming topics. For example, a Jacobian, its # eigenvalues and a curve of total energy (through the simulation) are calculated and plotted for # a few semidiscretizations. Moreover, we calculate an example for propagating errors with Measurement.jl # at the end. -# ### [17 Custom semidiscretization](@ref custom_semidiscretization) +# ### [18 Custom semidiscretization](@ref custom_semidiscretization) #- # This tutorial describes the [semidiscretiations](@ref overview-semidiscretizations) of Trixi.jl # and explains how to extend them for custom tasks. From 5fec7f42121be7d01d8645fc91fe0da8567f3946 Mon Sep 17 00:00:00 2001 From: ArseniyKholod <119304909+ArseniyKholod@users.noreply.github.com> Date: Tue, 6 Feb 2024 20:52:50 +0100 Subject: [PATCH 098/166] Getting started with trixi (#1343) * 0th tutorial v1 * 0th tutorial v2 * 0th tutorial v3 (topic for developers) * 0th tutorial v4 * 0th tutorial v5 * 0th tutorial v6 * 0th tutorial v6.1 * 0th tutorial v6.2 * 0th tutorial v7 (new example) * 0th tutorial v8 * 0th tutorial v8.1 * 0th tutorial v9 New structure + new usage example * 0th tutorial v9.1 * 0th tutorial v9.2 * Revert "0th tutorial v9.2" This reverts commit e2da5c24c71ec5ebb86ead4da9e6dc33f8b2cf7f. * 0th tutorial v9.3 (test) * Revert "0th tutorial v9.3 (test)" This reverts commit 05d3d1c22729fc65f6a85bc224e0376bf7cc79ac. * 0th tutorial v9.3 (test) * 0th tutorial v9.3 (test checks without diff. prog.) * 0th tutorial v9.3 (test new diff. prog.) * 0th tutorial v9.3 (test new diff. prog. v2) * 0th tutorial v9.3 (test update of packages) * 0th tutorial v9.3 (test update of packages v2) * 0th tutorial v9.3 (downgrade Measurements.jl) * 0th tutorial v9.4 * 0th tutorial v9.5 * 0th tutorial review * 0th tutorial review 2 * 0th tutorial v9.6 * delete test files * Revert "rename into getting_started.jl" This reverts commit 6605ece69240d8bc850c6ef0b14b94beaac5eee6. * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Update Project.toml Co-authored-by: Michael Schlottke-Lakemper * Update Project.toml Co-authored-by: Michael Schlottke-Lakemper * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * rename into getting_started.jl * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * correction of spelling errors * cleaning out directory * Correction according to the comments above * Trixi installation for Linux * Update getting_started.jl * cross-referencing correction * spelling * Update .gitignore Co-authored-by: Michael Schlottke-Lakemper * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update .gitignore * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * new plot in Modifying an existing setup * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * julia/shell-formatting * Update getting_started.jl * Update getting_started.jl * Correction of Modifying part * Usage update * Update getting_started.jl * add Visualize the solution * spell check * correction * correction * add picture paraview * divide in two parts & correction * divide in 3 parts * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update create_first_setup.jl * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update getting_started.jl * Update getting_started.jl * Update create_first_setup.jl * Update changing_trixi.jl * Update changing_trixi.jl * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * move files to subfolder * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update create_first_setup.jl * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * add transition between 2nd and 3rd parts * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * clear out folder * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update getting_started.jl * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update getting_started.jl * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update create_first_setup.jl * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update create_first_setup.jl * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * spelling * delete julia, tabs * add juliaup * add intro * rm * Apply suggestions from code review Co-authored-by: Daniel Doehring * Apply suggestions from code review Co-authored-by: Daniel Doehring * Update docs/literate/src/files/first_steps/getting_started.jl * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Andrew Winters * add save solution dt --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Daniel Doehring Co-authored-by: Andrew Winters --- .../src/files/first_steps/changing_trixi.jl | 77 +++++ .../files/first_steps/create_first_setup.jl | 268 ++++++++++++++++++ .../src/files/first_steps/getting_started.jl | 242 ++++++++++++++++ docs/literate/src/files/index.jl | 43 +-- docs/make.jl | 6 + 5 files changed, 618 insertions(+), 18 deletions(-) create mode 100644 docs/literate/src/files/first_steps/changing_trixi.jl create mode 100644 docs/literate/src/files/first_steps/create_first_setup.jl create mode 100644 docs/literate/src/files/first_steps/getting_started.jl diff --git a/docs/literate/src/files/first_steps/changing_trixi.jl b/docs/literate/src/files/first_steps/changing_trixi.jl new file mode 100644 index 00000000000..551377a6a71 --- /dev/null +++ b/docs/literate/src/files/first_steps/changing_trixi.jl @@ -0,0 +1,77 @@ +#src # Changing Trixi.jl itself + +# If you plan on editing Trixi.jl itself, you can download Trixi.jl locally and run it from +# the cloned directory. + + +# ## Cloning Trixi.jl + + +# ### Windows + +# If you are using Windows, you can clone Trixi.jl by using the GitHub Desktop tool: +# - If you do not have a GitHub account yet, create it on +# the [GitHub website](https://github.com/join). +# - Download and install [GitHub Desktop](https://desktop.github.com/) and then log in to +# your account. +# - Open GitHub Desktop, press `Ctrl+Shift+O`. +# - In the opened window, paste `trixi-framework/Trixi.jl` and choose the path to the folder where +# you want to save Trixi.jl. Then click `Clone` and Trixi.jl will be cloned to your computer. + +# Now you cloned Trixi.jl and only need to tell Julia to use the local clone as the package sources: +# - Open a terminal using `Win+r` and `cmd`. Navigate to the folder with the cloned Trixi.jl using `cd`. +# - Create a new directory `run`, enter it, and start Julia with the `--project=.` flag: +# ```shell +# mkdir run +# cd run +# julia --project=. +# ``` +# - Now run the following commands to install all relevant packages: +# ```julia +# using Pkg; Pkg.develop(PackageSpec(path="..")) # Tell Julia to use the local Trixi.jl clone +# Pkg.add(["OrdinaryDiffEq", "Plots"]) # Install additional packages +# ``` + +# Now you already installed Trixi.jl from your local clone. Note that if you installed Trixi.jl +# this way, you always have to start Julia with the `--project` flag set to your `run` directory, +# e.g., +# ```shell +# julia --project=. +# ``` +# if already inside the `run` directory. + + +# ### Linux + +# You can clone Trixi.jl to your computer by executing the following commands: +# ```shell +# git clone git@github.com:trixi-framework/Trixi.jl.git +# # If an error occurs, try the following: +# # git clone https://github.com/trixi-framework/Trixi.jl +# cd Trixi.jl +# mkdir run +# cd run +# julia --project=. -e 'using Pkg; Pkg.develop(PackageSpec(path=".."))' # Tell Julia to use the local Trixi.jl clone +# julia --project=. -e 'using Pkg; Pkg.add(["OrdinaryDiffEq", "Plots"])' # Install additional packages +# ``` +# Note that if you installed Trixi.jl this way, +# you always have to start Julia with the `--project` flag set to your `run` directory, e.g., +# ```shell +# julia --project=. +# ``` +# if already inside the `run` directory. + + +# ## Additional reading + +# To further delve into Trixi.jl, you may have a look at the following introductory tutorials. +# - [Introduction to DG methods](@ref scalar_linear_advection_1d) will teach you how to set up a +# simple way to approximate the solution of a hyperbolic partial differential equation. It will +# be especially useful to learn about the +# [Discontinuous Galerkin method](https://en.wikipedia.org/wiki/Discontinuous_Galerkin_method) +# and the way it is implemented in Trixi.jl. +# - [Adding a new scalar conservation law](@ref adding_new_scalar_equations) and +# [Adding a non-conservative equation](@ref adding_nonconservative_equation) +# describe how to add new physics models that are not yet included in Trixi.jl. +# - [Callbacks](@ref callbacks-id) gives an overview of how to regularly execute specific actions +# during a simulation, e.g., to store the solution or adapt the mesh. diff --git a/docs/literate/src/files/first_steps/create_first_setup.jl b/docs/literate/src/files/first_steps/create_first_setup.jl new file mode 100644 index 00000000000..906a6f93461 --- /dev/null +++ b/docs/literate/src/files/first_steps/create_first_setup.jl @@ -0,0 +1,268 @@ +#src # Create first setup + +# In this part of the introductory guide, we will create a first Trixi.jl setup as an extension of +# [`elixir_advection_basic.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_basic.jl). +# Since Trixi.jl has a common basic structure for the setups, you can create your own by extending +# and modifying the following example. + +# Let's consider the linear advection equation for a state ``u = u(x, y, t)`` on the two-dimensional spatial domain +# ``[-1, 1] \times [-1, 1]`` with a source term +# ```math +# \frac{\partial}{\partial t}u + \frac{\partial}{\partial x} (0.2 u) - \frac{\partial}{\partial y} (0.7 u) = - 2 e^{-t} +# \sin\bigl(2 \pi (x - t) \bigr) \sin\bigl(2 \pi (y - t) \bigr), +# ``` +# with the initial condition +# ```math +# u(x, y, 0) = \sin\bigl(\pi x \bigr) \sin\bigl(\pi y \bigr), +# ``` +# and periodic boundary conditions. + +# The first step is to create and open a file with the .jl extension. You can do this with your +# favorite text editor (if you do not have one, we recommend [VS Code](https://code.visualstudio.com/)). +# In this file you will create your setup. + +# To be able to use functionalities of Trixi.jl, you always need to load Trixi.jl itself +# and the [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) package. + +using Trixi +using OrdinaryDiffEq + +# The next thing to do is to choose an equation that is suitable for your problem. To see all the +# currently implemented equations, take a look at +# [`src/equations`](https://github.com/trixi-framework/Trixi.jl/tree/main/src/equations). +# If you are interested in adding a new physics model that has not yet been implemented in +# Trixi.jl, take a look at the tutorials +# [Adding a new scalar conservation law](@ref adding_new_scalar_equations) or +# [Adding a non-conservative equation](@ref adding_nonconservative_equation). + +# The linear scalar advection equation in two spatial dimensions +# ```math +# \frac{\partial}{\partial t}u + \frac{\partial}{\partial x} (a_1 u) + \frac{\partial}{\partial y} (a_2 u) = 0 +# ``` +# is already implemented in Trixi.jl as +# [`LinearScalarAdvectionEquation2D`](@ref), for which we need to define a two-dimensional parameter +# `advection_velocity` describing the parameters ``a_1`` and ``a_2``. Appropriate for our problem is `(0.2, -0.7)`. + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +# To solve our problem numerically using Trixi.jl, we have to discretize the spatial +# domain, for which we set up a mesh. One of the most used meshes in Trixi.jl is the +# [`TreeMesh`](@ref). The spatial domain used is ``[-1, 1] \times [-1, 1]``. We set an initial number +# of elements in the mesh using `initial_refinement_level`, which describes the initial number of +# hierarchical refinements. In this simple case, the total number of elements is `2^initial_refinement_level` +# throughout the simulation. The variable `n_cells_max` is used to limit the number of elements in the mesh, +# which cannot be exceeded when using [adaptive mesh refinement](@ref Adaptive-mesh-refinement). + +# All minimum and all maximum coordinates must be combined into `Tuples`. + +coordinates_min = (-1.0, -1.0) +coordinates_max = ( 1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + +# To approximate the solution of the defined model, we create a [`DGSEM`](@ref) solver. +# The solution in each of the recently defined mesh elements will be approximated by a polynomial +# of degree `polydeg`. For more information about discontinuous Galerkin methods, +# check out the [Introduction to DG methods](@ref scalar_linear_advection_1d) tutorial. + +solver = DGSEM(polydeg=3) + +# Now we need to define an initial condition for our problem. All the already implemented +# initial conditions for [`LinearScalarAdvectionEquation2D`](@ref) can be found in +# [`src/equations/linear_scalar_advection_2d.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/src/equations/linear_scalar_advection_2d.jl). +# If you want to use, for example, a Gaussian pulse, it can be used as follows: +# ```julia +# initial_conditions = initial_condition_gauss +# ``` +# But to show you how an arbitrary initial condition can be implemented in a way suitable for +# Trixi.jl, we define our own initial conditions. +# ```math +# u(x, y, 0) = \sin\bigl(\pi x \bigr) \sin\bigl(\pi y \bigr). +# ``` +# The initial conditions function must take spatial coordinates, time and equation as arguments +# and returns an initial condition as a statically sized vector `SVector`. Following the same structure, you +# can define your own initial conditions. The time variable `t` can be unused in the initial +# condition, but might also be used to describe an analytical solution if known. If you use the +# initial condition as analytical solution, you can analyze your numerical solution by computing +# the error, see also the +# [section about analyzing the solution](https://trixi-framework.github.io/Trixi.jl/stable/callbacks/#Analyzing-the-numerical-solution). + +function initial_condition_sinpi(x, t, equations::LinearScalarAdvectionEquation2D) + scalar = sinpi(x[1]) * sinpi(x[2]) + return SVector(scalar) +end +initial_condition = initial_condition_sinpi + +# The next step is to define a function of the source term corresponding to our problem. +# ```math +# f(u, x, y, t) = - 2 e^{-t} \sin\bigl(2 \pi (x - t) \bigr) \sin\bigl(2 \pi (y - t) \bigr) +# ``` +# This function must take the state variable, the spatial coordinates, the time and the +# equation itself as arguments and returns the source term as a static vector `SVector`. + +function source_term_exp_sinpi(u, x, t, equations::LinearScalarAdvectionEquation2D) + scalar = - 2 * exp(-t) * sinpi(2*(x[1] - t)) * sinpi(2*(x[2] - t)) + return SVector(scalar) +end + +# Now we collect all the information that is necessary to define a spatial discretization, +# which leaves us with an ODE problem in time with a span from 0.0 to 1.0. +# This approach is commonly referred to as the method of lines. + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; + source_terms = source_term_exp_sinpi) +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan); + +# At this point, our problem is defined. We will use the `solve` function defined in +# [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) to get the solution. +# OrdinaryDiffEq.jl gives us the ability to customize the solver +# using callbacks without actually modifying it. Trixi.jl already has some implemented +# [Callbacks](@ref callbacks-id). The most widely used callbacks in Trixi.jl are +# [step control callbacks](https://docs.sciml.ai/DiffEqCallbacks/stable/step_control/) that are +# activated at the end of each time step to perform some actions, e.g. to print statistics. +# We will show you how to use some of the common callbacks. + +# To print a summary of the simulation setup at the beginning +# and to reset timers we use the [`SummaryCallback`](@ref). +# When the returned callback is executed directly, the current timer values are shown. + +summary_callback = SummaryCallback() + +# We also want to analyze the current state of the solution in regular intervals. +# The [`AnalysisCallback`](@ref) outputs some useful statistical information during the solving process +# every `interval` time steps. + +analysis_callback = AnalysisCallback(semi, interval = 5) + +# It is also possible to control the time step size using the [`StepsizeCallback`](@ref) if the time +# integration method isn't adaptive itself. To get more details, look at +# [CFL based step size control](@ref CFL-based-step-size-control). + +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# To save the current solution in regular intervals we use the [`SaveSolutionCallback`](@ref). +# We would like to save the initial and final solutions as well. The data +# will be saved as HDF5 files located in the `out` folder. Afterwards it is possible to visualize +# a solution from saved files using Trixi2Vtk.jl and ParaView, which is described below in the +# section [Visualize the solution](@ref Visualize-the-solution). + +save_solution = SaveSolutionCallback(interval = 5, + save_initial_solution = true, + save_final_solution = true) + +# Alternatively, we have the option to print solution files at fixed time intervals. +# ```julua +# save_solution = SaveSolutionCallback(dt = 0.1, +# save_initial_solution = true, +# save_final_solution = true) +# ``` + +# Another useful callback is the [`SaveRestartCallback`](@ref). It saves information for restarting +# in regular intervals. We are interested in saving a restart file for the final solution as +# well. To perform a restart, you need to configure the restart setup in a special way, which is +# described in the section [Restart simulation](@ref restart). + +save_restart = SaveRestartCallback(interval = 100, save_final_restart = true) + +# Create a `CallbackSet` to collect all callbacks so that they can be passed to the `solve` +# function. + +callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback, save_solution, + save_restart) + +# The last step is to choose the time integration method. OrdinaryDiffEq.jl defines a wide range of +# [ODE solvers](https://docs.sciml.ai/DiffEqDocs/latest/solvers/ode_solve/), e.g. +# `CarpenterKennedy2N54(williamson_condition = false)`. We will pass the ODE +# problem, the ODE solver and the callbacks to the `solve` function. Also, to use +# `StepsizeCallback`, we must explicitly specify the initial trial time step `dt`, the selected +# value is not important, because it will be overwritten by the `StepsizeCallback`. And there is no +# need to save every step of the solution, we are only interested in the final result. + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), dt = 1.0, + save_everystep = false, callback = callbacks); + +# Finally, we print the timer summary. + +summary_callback() + +# Now you can plot the solution as shown below, analyze it and improve the stability, accuracy or +# efficiency of your setup. + + +# ## Visualize the solution + +# In the previous part of the tutorial, we calculated the final solution of the given problem, now we want +# to visualize it. A more detailed explanation of visualization methods can be found in the section +# [Visualization](@ref visualization). + + +# ### Using Plots.jl + +# The first option is to use the [Plots.jl](https://github.com/JuliaPlots/Plots.jl) package +# directly after calculations, when the solution is saved in the `sol` variable. We load the +# package and use the `plot` function. + +using Plots +plot(sol) + +# To show the mesh on the plot, we need to extract the visualization data from the solution as +# a [`PlotData2D`](@ref) object. Mesh extraction is possible using the [`getmesh`](@ref) function. +# Plots.jl has the `plot!` function that allows you to modify an already built graph. + +pd = PlotData2D(sol) +plot!(getmesh(pd)) + + +# ### Using Trixi2Vtk.jl + +# Another way to visualize a solution is to extract it from a saved HDF5 file. After we used the +# `solve` function with [`SaveSolutionCallback`](@ref) there is a file with the final solution. +# It is located in the `out` folder and is named as follows: `solution_index.h5`. The `index` +# is the final time step of the solution that is padded to 6 digits with zeros from the beginning. +# With [Trixi2Vtk](@ref) you can convert the HDF5 output file generated by Trixi.jl into a VTK file. +# This can be used in visualization tools such as [ParaView](https://www.paraview.org) or +# [VisIt](https://visit.llnl.gov) to plot the solution. The important thing is that currently +# Trixi2Vtk.jl supports conversion only for solutions in 2D and 3D spatial domains. + +# If you haven't added Trixi2Vtk.jl to your project yet, you can add it as follows. +# ```julia +# import Pkg +# Pkg.add(["Trixi2Vtk"]) +# ``` +# Now we load the Trixi2Vtk.jl package and convert the file `out/solution_000018.h5` with +# the final solution using the [`trixi2vtk`](@ref) function saving the resulting file in the +# `out` folder. + +using Trixi2Vtk +trixi2vtk(joinpath("out", "solution_000018.h5"), output_directory="out") + +# Now two files `solution_000018.vtu` and `solution_000018_celldata.vtu` have been generated in the +# `out` folder. The first one contains all the information for visualizing the solution, the +# second one contains all the cell-based or discretization-based information. + +# Now let's visualize the solution from the generated files in ParaView. Follow this short +# instruction to get the visualization. +# - Download, install and open [ParaView](https://www.paraview.org/download/). +# - Press `Ctrl+O` and select the generated files `solution_000018.vtu` and +# `solution_000018_celldata.vtu` from the `out` folder. +# - In the upper-left corner in the Pipeline Browser window, left-click on the eye-icon near +# `solution_000018.vtu`. +# - In the lower-left corner in the Properties window, change the Coloring from Solid Color to +# scalar. This already generates the visualization of the final solution. +# - Now let's add the mesh to the visualization. In the upper-left corner in the +# Pipeline Browser window, left-click on the eye-icon near `solution_000018_celldata.vtu`. +# - In the lower-left corner in the Properties window, change the Representation from Surface +# to Wireframe. Then a white grid should appear on the visualization. +# Now, if you followed the instructions exactly, you should get a similar image as shown in the +# section [Using Plots.jl](@ref Using-Plots.jl): + +# ![paraview_trixi2vtk_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/0c29139b-6c5d-4d5c-86e1-f4ebc95aca7e) + +# After completing this tutorial you are able to set up your own simulations with +# Trixi.jl. If you have an interest in contributing to Trixi.jl as a developer, refer to the third +# part of the introduction titled [Changing Trixi.jl itself](@ref changing_trixi). + +Sys.rm("out"; recursive=true, force=true) #hide #md \ No newline at end of file diff --git a/docs/literate/src/files/first_steps/getting_started.jl b/docs/literate/src/files/first_steps/getting_started.jl new file mode 100644 index 00000000000..2bfaf33b5fc --- /dev/null +++ b/docs/literate/src/files/first_steps/getting_started.jl @@ -0,0 +1,242 @@ +#src # Getting started + +# Trixi.jl is a numerical simulation framework for conservation laws and +# is written in the [Julia programming language](https://julialang.org/). +# This tutorial is intended for beginners in Julia and Trixi.jl. +# After reading it, you will know how to install Julia and Trixi.jl on your computer, +# and you will be able to download setup files from our GitHub repository, modify them, +# and run simulations. + +# The contents of this tutorial: +# - [Julia installation](@ref Julia-installation) +# - [Trixi.jl installation](@ref Trixi.jl-installation) +# - [Running a simulation](@ref Running-a-simulation) +# - [Getting an existing setup file](@ref Getting-an-existing-setup-file) +# - [Modifying an existing setup](@ref Modifying-an-existing-setup) + + +# ## Julia installation + +# Trixi.jl is compatible with the latest stable release of Julia. Additional details regarding Julia +# support can be found in the [`README.md`](https://github.com/trixi-framework/Trixi.jl#installation) +# file. The current default Julia installation is managed through `juliaup`. You may follow our +# concise installation guidelines for Windows, Linux, and MacOS provided below. In the event of any +# issues during the installation process, please consult the official +# [Julia installation instruction](https://julialang.org/downloads/). + + +# ### Windows + +# - Open a terminal by pressing `Win+r` and entering `cmd` in the opened window. +# - To install Julia, execute the following command in the terminal: +# ```shell +# winget install julia -s msstore +# ``` +# - Verify the successful installation of Julia by executing the following command in the terminal: +# ```shell +# julia +# ``` +# To exit Julia, execute `exit()` or press `Ctrl+d`. + + +# ### Linux and MacOS + +# - To install Julia, run the following command in a terminal: +# ```shell +# curl -fsSL https://install.julialang.org | sh +# ``` +# Follow the instructions displayed in the terminal during the installation process. +# - If an error occurs during the execution of the previous command, you may need to install +# `curl`. On Ubuntu-type systems, you can use the following command: +# ```shell +# sudo apt install curl +# ``` +# After installing `curl`, repeat the first step once more to proceed with Julia installation. +# - Verify the successful installation of Julia by executing the following command in the terminal: +# ```shell +# julia +# ``` +# To exit Julia, execute `exit()` or press `Ctrl+d`. + + +# ## Trixi.jl installation + +# Trixi.jl and its related tools are registered Julia packages, thus their installation +# happens inside Julia. +# For a smooth workflow experience with Trixi.jl, you need to install +# [Trixi.jl](https://github.com/trixi-framework/Trixi.jl), +# [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl), and +# [Plots.jl](https://github.com/JuliaPlots/Plots.jl). + +# - Open a terminal and start Julia. +# - Execute following commands: +# ```julia +# import Pkg +# Pkg.add(["OrdinaryDiffEq", "Plots", "Trixi"]) +# ``` + +# Now you have installed all these +# packages. [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) provides time +# integration schemes used by Trixi.jl and [Plots.jl](https://github.com/JuliaPlots/Plots.jl) +# can be used to directly visualize Trixi.jl results from the Julia REPL. + + +# ## Usage + + +# ### Running a simulation + +# To get you started, Trixi.jl has a large set +# of [example setups](https://github.com/trixi-framework/Trixi.jl/tree/main/examples), that can be +# taken as a basis for your future investigations. In Trixi.jl, we call these setup files +# "elixirs", since they contain Julia code that takes parts of Trixi.jl and combines them into +# something new. + +# Any of the examples can be executed using the [`trixi_include`](@ref) +# function. `trixi_include(...)` expects +# a single string argument with a path to a file containing Julia code. +# For convenience, the [`examples_dir`](@ref) function returns a path to the +# [`examples`](https://github.com/trixi-framework/Trixi.jl/tree/main/examples) +# folder, which has been locally downloaded while installing Trixi.jl. +# `joinpath(...)` can be used to join path components into a full path. + +# Let's execute a short two-dimensional problem setup. It approximates the solution of +# the compressible Euler equations in 2D for an ideal gas ([`CompressibleEulerEquations2D`](@ref)) +# with a weak blast wave as the initial condition. + +# Start Julia in a terminal and execute the following code: + +# ```julia +# using Trixi, OrdinaryDiffEq +# trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl")) +# ``` +using Trixi, OrdinaryDiffEq #hide #md +trixi_include(@__MODULE__,joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl")) #hide #md + +# To analyze the result of the computation, we can use the Plots.jl package and the function +# `plot(...)`, which creates a graphical representation of the solution. `sol` is a variable +# defined in the executed example and it contains the solution at the final moment of the simulation. + +using Plots +plot(sol) + +# To obtain a list of all Trixi.jl elixirs execute +# [`get_examples`](@ref). It returns the paths to all example setups. + +get_examples() + +# Editing an existing elixir is the best way to start your first own investigation using Trixi.jl. + + +# ### Getting an existing setup file + +# To edit an existing elixir, you first have to find a suitable one and then copy it to a local +# folder. Let's have a look at how to download the `elixir_euler_ec.jl` elixir used in the previous +# section from the [Trixi.jl GitHub repository](https://github.com/trixi-framework/Trixi.jl). + +# - All examples are located inside +# the [`examples`](https://github.com/trixi-framework/Trixi.jl/tree/main/examples) folder. +# - Navigate to the +# file [`elixir_euler_ec.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_ec.jl). +# - Right-click the `Raw` button on the right side of the webpage and choose `Save as...` +# (or `Save Link As...`). +# - Choose a folder and save the file. + + +# ### Modifying an existing setup + +# As an example, we will change the initial condition for calculations that occur in +# `elixir_euler_ec.jl`. In this example we consider the compressible Euler equations in two spatial +# dimensions, +# ```math +# \frac{\partial}{\partial t} +# \begin{pmatrix} +# \rho \\ \rho v_1 \\ \rho v_2 \\ \rho e +# \end{pmatrix} +# + +# \frac{\partial}{\partial x} +# \begin{pmatrix} +# \rho v_1 \\ \rho v_1^2 + p \\ \rho v_1 v_2 \\ (\rho e + p) v_1 +# \end{pmatrix} +# + +# \frac{\partial}{\partial y} +# \begin{pmatrix} +# \rho v_2 \\ \rho v_1 v_2 \\ \rho v_2^2 + p \\ (\rho e + p) v_2 +# \end{pmatrix} +# = +# \begin{pmatrix} +# 0 \\ 0 \\ 0 \\ 0 +# \end{pmatrix}, +# ``` +# for an ideal gas with the specific heat ratio ``\gamma``. +# Here, ``\rho`` is the density, ``v_1`` and ``v_2`` are the velocities, ``e`` is the specific +# total energy, and +# ```math +# p = (\gamma - 1) \left( \rho e - \frac{1}{2} \rho (v_1^2 + v_2^2) \right) +# ``` +# is the pressure. +# Initial conditions consist of initial values for ``\rho``, ``\rho v_1``, +# ``\rho v_2`` and ``\rho e``. +# One of the common initial conditions for the compressible Euler equations is a simple density +# wave. Let's implement it. + +# - Open the downloaded file `elixir_euler_ec.jl` with a text editor. +# - Go to the line with the following code: +# ```julia +# initial_condition = initial_condition_weak_blast_wave +# ``` +# Here, [`initial_condition_weak_blast_wave`](@ref) is used as the initial condition. +# - Comment out the line using the `#` symbol: +# ```julia +# # initial_condition = initial_condition_weak_blast_wave +# ``` +# - Now you can create your own initial conditions. Add the following code after the +# commented line: + +function initial_condition_density_waves(x, t, equations::CompressibleEulerEquations2D) + v1 = 0.1 # velocity along x-axis + v2 = 0.2 # velocity along y-axis + rho = 1.0 + 0.98 * sinpi(sum(x) - t * (v1 + v2)) # density wave profile + p = 20 # pressure + rho_e = p / (equations.gamma - 1) + 1/2 * rho * (v1^2 + v2^2) + return SVector(rho, rho*v1, rho*v2, rho_e) +end +initial_condition = initial_condition_density_waves + +# - Execute the following code one more time, but instead of `path/to/file` paste the path to the +# `elixir_euler_ec.jl` file that you just edited. +# ```julia +# using Trixi +# trixi_include(path/to/file) +# using Plots +# plot(sol) +# ``` +# Then you will obtain a new solution from running the simulation with a different initial +# condition. + +trixi_include(@__MODULE__,joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl"), #hide #md + initial_condition=initial_condition) #hide #md +pd = PlotData2D(sol) #hide #md +p1 = plot(pd["rho"]) #hide #md +p2 = plot(pd["v1"], clim=(0.05, 0.15)) #hide #md +p3 = plot(pd["v2"], clim=(0.15, 0.25)) #hide #md +p4 = plot(pd["p"], clim=(10, 30)) #hide #md +plot(p1, p2, p3, p4) #hide #md + +# To get exactly the same picture execute the following. +# ```julia +# pd = PlotData2D(sol) +# p1 = plot(pd["rho"]) +# p2 = plot(pd["v1"], clim=(0.05, 0.15)) +# p3 = plot(pd["v2"], clim=(0.15, 0.25)) +# p4 = plot(pd["p"], clim=(10, 30)) +# plot(p1, p2, p3, p4) +# ``` + +# Feel free to make further changes to the initial condition to observe different solutions. + +# Now you are able to download, modify and execute simulation setups for Trixi.jl. To explore +# further details on setting up a new simulation with Trixi.jl, refer to the second part of +# the introduction titled [Create first setup](@ref create_first_setup). + +Sys.rm("out"; recursive=true, force=true) #hide #md \ No newline at end of file diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl index 26637e5b24b..1fc025d84da 100644 --- a/docs/literate/src/files/index.jl +++ b/docs/literate/src/files/index.jl @@ -14,20 +14,27 @@ # There are tutorials for the following topics: -# ### [1 Introduction to DG methods](@ref scalar_linear_advection_1d) +# ### [1 First steps in Trixi.jl](@ref getting_started) +#- +# This tutorial provides guidance for getting started with Trixi.jl, and Julia as well. It outlines +# the installation procedures for both Julia and Trixi.jl, the execution of Trixi.jl elixirs, the +# fundamental structure of a Trixi.jl setup, the visualization of results, and the development +# process for Trixi.jl. + +# ### [2 Introduction to DG methods](@ref scalar_linear_advection_1d) #- # This tutorial gives an introduction to discontinuous Galerkin (DG) methods with the example of the # scalar linear advection equation in 1D. Starting with some theoretical explanations, we first implement # a raw version of a discontinuous Galerkin spectral element method (DGSEM). Then, we will show how # to use features of Trixi.jl to achieve the same result. -# ### [2 DGSEM with flux differencing](@ref DGSEM_FluxDiff) +# ### [3 DGSEM with flux differencing](@ref DGSEM_FluxDiff) #- # To improve stability often the flux differencing formulation of the DGSEM (split form) is used. # We want to present the idea and formulation on a basic 1D level. Then, we show how this formulation # can be implemented in Trixi.jl and analyse entropy conservation for two different flux combinations. -# ### [3 Shock capturing with flux differencing and stage limiter](@ref shock_capturing) +# ### [4 Shock capturing with flux differencing and stage limiter](@ref shock_capturing) #- # Using the flux differencing formulation, a simple procedure to capture shocks is a hybrid blending # of a high-order DG method and a low-order subcell finite volume (FV) method. We present the idea on a @@ -35,20 +42,20 @@ # explained and added to an exemplary simulation of the Sedov blast wave with the 2D compressible Euler # equations. -# ### [4 Non-periodic boundary conditions](@ref non_periodic_boundaries) +# ### [5 Non-periodic boundary conditions](@ref non_periodic_boundaries) #- # Thus far, all examples used periodic boundaries. In Trixi.jl, you can also set up a simulation with # non-periodic boundaries. This tutorial presents the implementation of the classical Dirichlet # boundary condition with a following example. Then, other non-periodic boundaries are mentioned. -# ### [5 DG schemes via `DGMulti` solver](@ref DGMulti_1) +# ### [6 DG schemes via `DGMulti` solver](@ref DGMulti_1) #- # This tutorial is about the more general DG solver [`DGMulti`](@ref), introduced [here](@ref DGMulti). # We are showing some examples for this solver, for instance with discretization nodes by Gauss or # triangular elements. Moreover, we present a simple way to include pre-defined triangulate meshes for # non-Cartesian domains using the package [StartUpDG.jl](https://github.com/jlchan/StartUpDG.jl). -# ### [6 Other SBP schemes (FD, CGSEM) via `DGMulti` solver](@ref DGMulti_2) +# ### [7 Other SBP schemes (FD, CGSEM) via `DGMulti` solver](@ref DGMulti_2) #- # Supplementary to the previous tutorial about DG schemes via the `DGMulti` solver we now present # the possibility for `DGMulti` to use other SBP schemes via the package @@ -56,7 +63,7 @@ # For instance, we show how to set up a finite differences (FD) scheme and a continuous Galerkin # (CGSEM) method. -# ### [7 Upwind FD SBP schemes](@ref upwind_fdsbp) +# ### [8 Upwind FD SBP schemes](@ref upwind_fdsbp) #- # General SBP schemes can not only be used via the [`DGMulti`](@ref) solver but # also with a general `DG` solver. In particular, upwind finite difference SBP @@ -64,42 +71,42 @@ # schemes in the `DGMulti` framework, the interface is based on the package # [SummationByPartsOperators.jl](https://github.com/ranocha/SummationByPartsOperators.jl). -# ### [8 Adding a new scalar conservation law](@ref adding_new_scalar_equations) +# ### [9 Adding a new scalar conservation law](@ref adding_new_scalar_equations) #- # This tutorial explains how to add a new physics model using the example of the cubic conservation # law. First, we define the equation using a `struct` `CubicEquation` and the physical flux. Then, # the corresponding standard setup in Trixi.jl (`mesh`, `solver`, `semi` and `ode`) is implemented # and the ODE problem is solved by OrdinaryDiffEq's `solve` method. -# ### [9 Adding a non-conservative equation](@ref adding_nonconservative_equation) +# ### [10 Adding a non-conservative equation](@ref adding_nonconservative_equation) #- # In this part, another physics model is implemented, the nonconservative linear advection equation. # We run two different simulations with different levels of refinement and compare the resulting errors. -# ### [10 Parabolic terms](@ref parabolic_terms) +# ### [11 Parabolic terms](@ref parabolic_terms) #- # This tutorial describes how parabolic terms are implemented in Trixi.jl, e.g., # to solve the advection-diffusion equation. -# ### [11 Adding new parabolic terms](@ref adding_new_parabolic_terms) +# ### [12 Adding new parabolic terms](@ref adding_new_parabolic_terms) #- # This tutorial describes how new parabolic terms can be implemented using Trixi.jl. -# ### [12 Adaptive mesh refinement](@ref adaptive_mesh_refinement) +# ### [13 Adaptive mesh refinement](@ref adaptive_mesh_refinement) #- # Adaptive mesh refinement (AMR) helps to increase the accuracy in sensitive or turbolent regions while # not wasting resources for less interesting parts of the domain. This leads to much more efficient # simulations. This tutorial presents the implementation strategy of AMR in Trixi.jl, including the use of # different indicators and controllers. -# ### [13 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping) +# ### [14 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping) #- # In this tutorial, the use of Trixi.jl's structured curved mesh type [`StructuredMesh`](@ref) is explained. # We present the two basic option to initialize such a mesh. First, the curved domain boundaries # of a circular cylinder are set by explicit boundary functions. Then, a fully curved mesh is # defined by passing the transformation mapping. -# ### [14 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial) +# ### [15 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial) #- # The purpose of this tutorial is to demonstrate how to use the [`UnstructuredMesh2D`](@ref) # functionality of Trixi.jl. This begins by running and visualizing an available unstructured @@ -108,26 +115,26 @@ # software in the Trixi.jl ecosystem, and then run a simulation using Trixi.jl on said mesh. # In the end, the tutorial briefly explains how to simulate an example using AMR via `P4estMesh`. -# ### [15 P4est mesh from gmsh](@ref p4est_from_gmsh) +# ### [16 P4est mesh from gmsh](@ref p4est_from_gmsh) #- # This tutorial describes how to obtain a [`P4estMesh`](@ref) from an existing mesh generated # by [`gmsh`](https://gmsh.info/) or any other meshing software that can export to the Abaqus # input `.inp` format. The tutorial demonstrates how edges/faces can be associated with boundary conditions based on the physical nodesets. -# ### [16 Explicit time stepping](@ref time_stepping) +# ### [17 Explicit time stepping](@ref time_stepping) #- # This tutorial is about time integration using [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl). # It explains how to use their algorithms and presents two types of time step choices - with error-based # and CFL-based adaptive step size control. -# ### [17 Differentiable programming](@ref differentiable_programming) +# ### [18 Differentiable programming](@ref differentiable_programming) #- # This part deals with some basic differentiable programming topics. For example, a Jacobian, its # eigenvalues and a curve of total energy (through the simulation) are calculated and plotted for # a few semidiscretizations. Moreover, we calculate an example for propagating errors with Measurement.jl # at the end. -# ### [18 Custom semidiscretization](@ref custom_semidiscretization) +# ### [19 Custom semidiscretization](@ref custom_semidiscretization) #- # This tutorial describes the [semidiscretiations](@ref overview-semidiscretizations) of Trixi.jl # and explains how to extend them for custom tasks. diff --git a/docs/make.jl b/docs/make.jl index 7fce3b31e24..584f151b5f3 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -48,6 +48,12 @@ end # "title" => ["subtitle 1" => ("folder 1", "filename 1.jl"), # "subtitle 2" => ("folder 2", "filename 2.jl")] files = [ + # Topic: introduction + "First steps in Trixi.jl" => [ + "Getting started" => ("first_steps", "getting_started.jl"), + "Create first setup" => ("first_steps", "create_first_setup.jl"), + "Changing Trixi.jl itself" => ("first_steps", "changing_trixi.jl"), + ], # Topic: DG semidiscretizations "Introduction to DG methods" => "scalar_linear_advection_1d.jl", "DGSEM with flux differencing" => "DGSEM_FluxDiff.jl", From fa18c8bab4c6855a989691ede7f8947c5e3ea945 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 7 Feb 2024 07:19:49 +0100 Subject: [PATCH 099/166] fix benchmarks configuration (#1837) --- benchmark/benchmarks.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index a3f7d1d2569..15d1d96c05f 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -55,5 +55,5 @@ let SUITE["latency"]["euler_2d"] = @benchmarkable run( `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 SUITE["latency"]["mhd_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 end From 8c6d9bc727219e470f56f3317997f17eb5615842 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 7 Feb 2024 07:27:40 +0100 Subject: [PATCH 100/166] Make CI fail if Codecov fails (#1834) --- .github/workflows/ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2e388366fc8..86bd3f836e5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -134,7 +134,8 @@ jobs: file: ./lcov.info flags: unittests name: codecov-umbrella - fail_ci_if_error: false + fail_ci_if_error: true + verbose: true token: ${{ secrets.CODECOV_TOKEN }} # The standard setup of Coveralls is just annoying for parallel builds, see, e.g., # https://github.com/trixi-framework/Trixi.jl/issues/691 From 4fb8160c397df922e8fc4906a5c8f92225c21ecd Mon Sep 17 00:00:00 2001 From: Michael Schlottke-Lakemper Date: Wed, 7 Feb 2024 08:43:50 +0100 Subject: [PATCH 101/166] Update compat bounds for Makie, CairoMakie (#1836) --- Project.toml | 2 +- docs/Project.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index e99b08e0e81..9b624c7733c 100644 --- a/Project.toml +++ b/Project.toml @@ -68,7 +68,7 @@ LinearAlgebra = "1" LinearMaps = "2.7, 3.0" LoopVectorization = "0.12.118" MPI = "0.20" -Makie = "0.19" +Makie = "0.19, 0.20" MuladdMacro = "0.2.2" Octavian = "0.3.5" OffsetArrays = "1.3" diff --git a/docs/Project.toml b/docs/Project.toml index 3a091f5b4f1..cc48aeb8ed9 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -12,7 +12,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95" [compat] -CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" +CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10, 0.11" Documenter = "1" ForwardDiff = "0.10" HOHQMesh = "0.1, 0.2" From 3e6872bd486a4cfcc94bd2c31b2fe8510670a7a2 Mon Sep 17 00:00:00 2001 From: Michael Schlottke-Lakemper Date: Wed, 7 Feb 2024 09:06:07 +0100 Subject: [PATCH 102/166] Enable CI testing on Apple Silicon (#1830) * Enable CI testing on Apple Silicon * Use `threaded` instead of `threaded_legacy` --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 86bd3f836e5..9d398f187b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -101,6 +101,10 @@ jobs: os: windows-latest arch: x64 trixi_test: threaded + - version: '1.9' + os: macos-14 + arch: arm64 + trixi_test: threaded steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v1 From fe6a5276e1ea92a23d4897ca33a057288f277e8f Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 7 Feb 2024 10:37:22 +0100 Subject: [PATCH 103/166] coverallsapp at v2 (#1777) Co-authored-by: Michael Schlottke-Lakemper --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9d398f187b3..a7dfe033a90 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -200,7 +200,7 @@ jobs: coverage = merge_coverage_counts(coverage) @show covered_lines, total_lines = get_summary(coverage) LCOV.writefile("./lcov.info", coverage) - - uses: coverallsapp/github-action@master + - uses: coverallsapp/github-action@v2 with: github-token: ${{ secrets.GITHUB_TOKEN }} path-to-lcov: ./lcov.info From 4369c1c00b3e44de43bba95adeedfb37fd09551c Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 14 Feb 2024 07:56:08 +0100 Subject: [PATCH 104/166] Fix formatter to older version (#1843) * Fix formatter to older version * fix JuliaFormatter version also in utils folder * Update utils/trixi-format-file.jl Co-authored-by: Arpit Babbar * Update utils/trixi-format.jl Co-authored-by: Arpit Babbar * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * format --------- Co-authored-by: Arpit Babbar Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- .github/workflows/FormatCheck.yml | 2 +- utils/trixi-format-file.jl | 3 ++- utils/trixi-format.jl | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml index a733cb7cc21..7297f1c3ff5 100644 --- a/.github/workflows/FormatCheck.yml +++ b/.github/workflows/FormatCheck.yml @@ -29,7 +29,7 @@ jobs: # TODO: Change the call below to # format(".") run: | - julia -e 'using Pkg; Pkg.add(PackageSpec(name = "JuliaFormatter"))' + julia -e 'using Pkg; Pkg.add(PackageSpec(name = "JuliaFormatter", version="1.0.45"))' julia -e 'using JuliaFormatter; format(["benchmark", "examples", "ext", "src", "test", "utils"])' - name: Format check run: | diff --git a/utils/trixi-format-file.jl b/utils/trixi-format-file.jl index c4d8e7c9032..9b9a0e4949c 100755 --- a/utils/trixi-format-file.jl +++ b/utils/trixi-format-file.jl @@ -2,7 +2,8 @@ using Pkg Pkg.activate(; temp = true, io = devnull) -Pkg.add("JuliaFormatter"; preserve = PRESERVE_ALL, io = devnull) +Pkg.add(PackageSpec(name = "JuliaFormatter", version = "1.0.45"); preserve = PRESERVE_ALL, + io = devnull) using JuliaFormatter: format_file diff --git a/utils/trixi-format.jl b/utils/trixi-format.jl index d1e7efa656a..63f14078807 100755 --- a/utils/trixi-format.jl +++ b/utils/trixi-format.jl @@ -2,7 +2,8 @@ using Pkg Pkg.activate(; temp = true, io = devnull) -Pkg.add("JuliaFormatter"; preserve = PRESERVE_ALL, io = devnull) +Pkg.add(PackageSpec(name = "JuliaFormatter", version = "1.0.45"); preserve = PRESERVE_ALL, + io = devnull) using JuliaFormatter: format From 4f33837e3c95b5af21057850c30ef603a9191d86 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Thu, 15 Feb 2024 08:50:30 +0100 Subject: [PATCH 105/166] Use `trixi_include` from TrixiBase.jl (#1832) * Use `trixi_include` from TrixiBase.jl * Add compat entry for TrixiBase.jl * Remove unused functions * Use `TrixiBase.walkexpr` * Fix docs * Add TrixiBase.jl API reference * Add TrixiBase to docs dependencies * Import `TrixiBase` * Remove `find_assignment` * Add TrixiBase to makedocs modules * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- Project.toml | 2 + docs/Project.toml | 2 + docs/make.jl | 4 +- docs/src/conventions.md | 10 +- docs/src/reference-trixibase.md | 9 ++ src/Trixi.jl | 3 +- src/auxiliary/precompile.jl | 3 - src/auxiliary/special_elixirs.jl | 158 +------------------------------ src/callbacks_step/trivial.jl | 4 +- test/test_unit.jl | 97 ------------------- 10 files changed, 31 insertions(+), 261 deletions(-) create mode 100644 docs/src/reference-trixibase.md diff --git a/Project.toml b/Project.toml index 9b624c7733c..b4a06a70688 100644 --- a/Project.toml +++ b/Project.toml @@ -45,6 +45,7 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" Triangulate = "f7e6ffb2-c36d-4f8f-a77e-16e897189344" TriplotBase = "981d1d27-644d-49a2-9326-4793e63143c3" TriplotRecipes = "808ab39a-a642-4abf-81ff-4cb34ebbffa3" +TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" [weakdeps] Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" @@ -95,6 +96,7 @@ TimerOutputs = "0.5.7" Triangulate = "2.0" TriplotBase = "0.1" TriplotRecipes = "0.1" +TrixiBase = "0.1.1" julia = "1.8" [extras] diff --git a/docs/Project.toml b/docs/Project.toml index cc48aeb8ed9..3b8d169fdb8 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -10,6 +10,7 @@ OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95" +TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" [compat] CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10, 0.11" @@ -23,3 +24,4 @@ OrdinaryDiffEq = "6.49.1" Plots = "1.9" Test = "1" Trixi2Vtk = "0.3" +TrixiBase = "0.1.1" diff --git a/docs/make.jl b/docs/make.jl index 584f151b5f3..946b803b71e 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,6 +8,7 @@ end using Trixi using Trixi2Vtk +using TrixiBase # Get Trixi.jl root directory trixi_root_dir = dirname(@__DIR__) @@ -82,7 +83,7 @@ tutorials = create_tutorials(files) # Make documentation makedocs( # Specify modules for which docstrings should be shown - modules = [Trixi, Trixi2Vtk], + modules = [Trixi, TrixiBase, Trixi2Vtk], # Set sitename to Trixi.jl sitename = "Trixi.jl", # Provide additional formatting options @@ -128,6 +129,7 @@ makedocs( "Troubleshooting and FAQ" => "troubleshooting.md", "Reference" => [ "Trixi.jl" => "reference-trixi.md", + "TrixiBase.jl" => "reference-trixibase.md", "Trixi2Vtk.jl" => "reference-trixi2vtk.md" ], "Authors" => "authors.md", diff --git a/docs/src/conventions.md b/docs/src/conventions.md index dab1b8533a5..4f9e0ec4e67 100644 --- a/docs/src/conventions.md +++ b/docs/src/conventions.md @@ -47,10 +47,12 @@ Trixi.jl is distributed with several examples in the form of elixirs, small Julia scripts containing everything to set up and run a simulation. Working interactively from the Julia REPL with these scripts can be quite convenient while for exploratory research and development of Trixi.jl. For example, you -can use the convenience function [`trixi_include`](@ref) to `include` an elixir -with some modified arguments. To enable this, it is helpful to use a consistent -naming scheme in elixirs, since [`trixi_include`](@ref) can only perform simple -replacements. Some standard variables names are +can use the convenience function +[`trixi_include`](@ref) +to `include` an elixir with some modified arguments. To enable this, it is +helpful to use a consistent naming scheme in elixirs, since +[`trixi_include`](@ref) +can only perform simple replacements. Some standard variables names are - `polydeg` for the polynomial degree of a solver - `surface_flux` for the numerical flux at surfaces diff --git a/docs/src/reference-trixibase.md b/docs/src/reference-trixibase.md new file mode 100644 index 00000000000..c7a970f88ec --- /dev/null +++ b/docs/src/reference-trixibase.md @@ -0,0 +1,9 @@ +# TrixiBase.jl API + +```@meta +CurrentModule = TrixiBase +``` + +```@autodocs +Modules = [TrixiBase] +``` diff --git a/src/Trixi.jl b/src/Trixi.jl index 8d74fbc9736..ea72fbc915f 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -70,6 +70,7 @@ using Triangulate: Triangulate, TriangulateIO, triangulate export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor +@reexport using TrixiBase: TrixiBase, trixi_include @reexport using SimpleUnPack: @unpack using SimpleUnPack: @pack! using DataStructures: BinaryHeap, FasterForward, extract_all! @@ -129,7 +130,7 @@ include("callbacks_step/callbacks_step.jl") include("callbacks_stage/callbacks_stage.jl") include("semidiscretization/semidiscretization_euler_gravity.jl") -# `trixi_include` and special elixirs such as `convergence_test` +# Special elixirs such as `convergence_test` include("auxiliary/special_elixirs.jl") # Plot recipes and conversion functions to visualize results with Plots.jl diff --git a/src/auxiliary/precompile.jl b/src/auxiliary/precompile.jl index 9cec502f6cb..4d5399b5ba3 100644 --- a/src/auxiliary/precompile.jl +++ b/src/auxiliary/precompile.jl @@ -577,9 +577,6 @@ function _precompile_manual_() @assert Base.precompile(Tuple{typeof(show), Base.TTY, lbm_collision_callback_type}) @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", lbm_collision_callback_type}) - - # infrastructure, special elixirs - @assert Base.precompile(Tuple{typeof(trixi_include), String}) end @assert Base.precompile(Tuple{typeof(init_mpi)}) diff --git a/src/auxiliary/special_elixirs.jl b/src/auxiliary/special_elixirs.jl index 5fdd9aea0c5..d71a27aa96a 100644 --- a/src/auxiliary/special_elixirs.jl +++ b/src/auxiliary/special_elixirs.jl @@ -5,58 +5,6 @@ @muladd begin #! format: noindent -# Note: We can't call the method below `Trixi.include` since that is created automatically -# inside `module Trixi` to `include` source files and evaluate them within the global scope -# of `Trixi`. However, users will want to evaluate in the global scope of `Main` or something -# similar to manage dependencies on their own. -""" - trixi_include([mod::Module=Main,] elixir::AbstractString; kwargs...) - -`include` the file `elixir` and evaluate its content in the global scope of module `mod`. -You can override specific assignments in `elixir` by supplying keyword arguments. -It's basic purpose is to make it easier to modify some parameters while running Trixi.jl from the -REPL. Additionally, this is used in tests to reduce the computational burden for CI while still -providing examples with sensible default values for users. - -Before replacing assignments in `elixir`, the keyword argument `maxiters` is inserted -into calls to `solve` and `Trixi.solve` with it's default value used in the SciML ecosystem -for ODEs, see the "Miscellaneous" section of the -[documentation](https://docs.sciml.ai/DiffEqDocs/stable/basics/common_solver_opts/). - -# Examples - -```jldoctest -julia> redirect_stdout(devnull) do - trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_1d_dgsem", "elixir_advection_extended.jl"), - tspan=(0.0, 0.1)) - sol.t[end] - end -[ Info: You just called `trixi_include`. Julia may now compile the code, please be patient. -0.1 -``` -""" -function trixi_include(mod::Module, elixir::AbstractString; kwargs...) - # Check that all kwargs exist as assignments - code = read(elixir, String) - expr = Meta.parse("begin \n$code \nend") - expr = insert_maxiters(expr) - - for (key, val) in kwargs - # This will throw an error when `key` is not found - find_assignment(expr, key) - end - - # Print information on potential wait time only in non-parallel case - if !mpi_isparallel() - @info "You just called `trixi_include`. Julia may now compile the code, please be patient." - end - Base.include(ex -> replace_assignments(insert_maxiters(ex); kwargs...), mod, elixir) -end - -function trixi_include(elixir::AbstractString; kwargs...) - trixi_include(Main, elixir; kwargs...) -end - """ convergence_test([mod::Module=Main,] elixir::AbstractString, iterations; kwargs...) @@ -177,112 +125,15 @@ end # Helper methods used in the functions defined above -# Apply the function `f` to `expr` and all sub-expressions recursively. -walkexpr(f, expr::Expr) = f(Expr(expr.head, (walkexpr(f, arg) for arg in expr.args)...)) -walkexpr(f, x) = f(x) - -# Insert the keyword argument `maxiters` into calls to `solve` and `Trixi.solve` -# with default value `10^5` if it is not already present. -function insert_maxiters(expr) - maxiters_default = 10^5 - - expr = walkexpr(expr) do x - if x isa Expr - is_plain_solve = x.head === Symbol("call") && x.args[1] === Symbol("solve") - is_trixi_solve = (x.head === Symbol("call") && x.args[1] isa Expr && - x.args[1].head === Symbol(".") && - x.args[1].args[1] === Symbol("Trixi") && - x.args[1].args[2] isa QuoteNode && - x.args[1].args[2].value === Symbol("solve")) - - if is_plain_solve || is_trixi_solve - # Do nothing if `maxiters` is already set as keyword argument... - for arg in x.args - # This detects the case where `maxiters` is set as keyword argument - # without or before a semicolon - if (arg isa Expr && arg.head === Symbol("kw") && - arg.args[1] === Symbol("maxiters")) - return x - end - - # This detects the case where maxiters is set as keyword argument - # after a semicolon - if (arg isa Expr && arg.head === Symbol("parameters")) - # We need to check each keyword argument listed here - for nested_arg in arg.args - if (nested_arg isa Expr && - nested_arg.head === Symbol("kw") && - nested_arg.args[1] === Symbol("maxiters")) - return x - end - end - end - end - - # ...and insert it otherwise. - push!(x.args, Expr(Symbol("kw"), Symbol("maxiters"), maxiters_default)) - end - end - return x - end - - return expr -end - -# Replace assignments to `key` in `expr` by `key = val` for all `(key,val)` in `kwargs`. -function replace_assignments(expr; kwargs...) - # replace explicit and keyword assignments - expr = walkexpr(expr) do x - if x isa Expr - for (key, val) in kwargs - if (x.head === Symbol("=") || x.head === :kw) && - x.args[1] === Symbol(key) - x.args[2] = :($val) - # dump(x) - end - end - end - return x - end - - return expr -end - -# find a (keyword or common) assignment to `destination` in `expr` -# and return the assigned value -function find_assignment(expr, destination) - # declare result to be able to assign to it in the closure - local result - found = false - - # find explicit and keyword assignments - walkexpr(expr) do x - if x isa Expr - if (x.head === Symbol("=") || x.head === :kw) && - x.args[1] === Symbol(destination) - result = x.args[2] - found = true - # dump(x) - end - end - return x - end - - if !found - throw(ArgumentError("assignment `$destination` not found in expression")) - end - - result -end - -# searches the parameter that specifies the mesh reslution in the elixir +# Searches for the assignment that specifies the mesh resolution in the elixir function extract_initial_resolution(elixir, kwargs) code = read(elixir, String) expr = Meta.parse("begin \n$code \nend") try # get the initial_refinement_level from the elixir - initial_refinement_level = find_assignment(expr, :initial_refinement_level) + initial_refinement_level = TrixiBase.find_assignment(expr, + :initial_refinement_level) if haskey(kwargs, :initial_refinement_level) return kwargs[:initial_refinement_level] @@ -294,7 +145,8 @@ function extract_initial_resolution(elixir, kwargs) if isa(e, ArgumentError) try # get cells_per_dimension from the elixir - cells_per_dimension = eval(find_assignment(expr, :cells_per_dimension)) + cells_per_dimension = eval(TrixiBase.find_assignment(expr, + :cells_per_dimension)) if haskey(kwargs, :cells_per_dimension) return kwargs[:cells_per_dimension] diff --git a/src/callbacks_step/trivial.jl b/src/callbacks_step/trivial.jl index a55b7d85b13..fb93cf96c0c 100644 --- a/src/callbacks_step/trivial.jl +++ b/src/callbacks_step/trivial.jl @@ -8,8 +8,8 @@ """ TrivialCallback() -A callback that does nothing. This can be useful to disable some callbacks -easily via [`trixi_include`](@ref). +A callback that does nothing. This can be useful to disable some callbacks easily via +[`trixi_include`](@ref). """ function TrivialCallback() DiscreteCallback(trivial_callback, trivial_callback, diff --git a/test/test_unit.jl b/test/test_unit.jl index 7943d952f71..3b8dc3c4331 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -1529,103 +1529,6 @@ end @test mesh.boundary_faces[:entire_boundary] == [1, 2] end - -@testset "trixi_include" begin - @trixi_testset "Basic" begin - example = """ - x = 4 - """ - - filename = tempname() - try - open(filename, "w") do file - write(file, example) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `trixi_include` with `@__MODULE__` in order to isolate this test. - @test_warn "You just called" trixi_include(@__MODULE__, filename) - @test @isdefined x - @test x == 4 - - @test_warn "You just called" trixi_include(@__MODULE__, filename, x = 7) - @test x == 7 - - @test_throws "assignment `y` not found in expression" trixi_include(@__MODULE__, - filename, - y = 3) - finally - rm(filename, force = true) - end - end - - @trixi_testset "With `solve` Without `maxiters`" begin - # `trixi_include` assumes this to be the `solve` function of OrdinaryDiffEq, - # and therefore tries to insert the kwarg `maxiters`, which will fail here. - example = """ - solve() = 0 - x = solve() - """ - - filename = tempname() - try - open(filename, "w") do file - write(file, example) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `trixi_include` with `@__MODULE__` in order to isolate this test. - @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, - filename) - - @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, - filename, - maxiters = 3) - finally - rm(filename, force = true) - end - end - - @trixi_testset "With `solve` with `maxiters`" begin - # We need another example file that we include with `Base.include` first, in order to - # define the `solve` method without `trixi_include` trying to insert `maxiters` kwargs. - # Then, we can test that `trixi_include` inserts the kwarg in the `solve()` call. - example1 = """ - solve(; maxiters=0) = maxiters - """ - - example2 = """ - x = solve() - """ - - filename1 = tempname() - filename2 = tempname() - try - open(filename1, "w") do file - write(file, example1) - end - open(filename2, "w") do file - write(file, example2) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `Base.include` and `trixi_include` with `@__MODULE__` in order to isolate this test. - Base.include(@__MODULE__, filename1) - @test_warn "You just called" trixi_include(@__MODULE__, filename2) - @test @isdefined x - # This is the default `maxiters` inserted by `trixi_include` - @test x == 10^5 - - @test_warn "You just called" trixi_include(@__MODULE__, filename2, - maxiters = 7) - # Test that `maxiters` got overwritten - @test x == 7 - finally - rm(filename1, force = true) - rm(filename2, force = true) - end - end -end end end #module From 08c6034139451ba03be2abb6748bcd402f151b5d Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Thu, 15 Feb 2024 21:37:50 +0100 Subject: [PATCH 106/166] Don't export `TrixiBase` (#1846) --- src/Trixi.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Trixi.jl b/src/Trixi.jl index ea72fbc915f..8ab8085d4e8 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -70,7 +70,8 @@ using Triangulate: Triangulate, TriangulateIO, triangulate export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor -@reexport using TrixiBase: TrixiBase, trixi_include +@reexport using TrixiBase: trixi_include +using TrixiBase: TrixiBase @reexport using SimpleUnPack: @unpack using SimpleUnPack: @pack! using DataStructures: BinaryHeap, FasterForward, extract_all! From a872bc55e545baf36be91a949940434ae89f3426 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 16 Feb 2024 06:24:33 +0100 Subject: [PATCH 107/166] set version to v0.6.9 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index b4a06a70688..10bf79cf9a5 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.9-pre" +version = "0.6.9" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 29e173eb5f955771755cc28342e07c9903204327 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 16 Feb 2024 06:24:52 +0100 Subject: [PATCH 108/166] set development version to v0.6.10-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 10bf79cf9a5..af3e6b4d078 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.9" +version = "0.6.10-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 3c9374bb6c336be32c56d5c13b213ae3900269a3 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 20 Feb 2024 16:15:48 +0100 Subject: [PATCH 109/166] Classic LWR traffic flow (#1840) * Add classic LWR traffic flow model to Trixi * fmt * shorten * Update examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl * Update examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl * rm IC const, fmt * add davis wave speed estimate for upcoming change * news and md * Use euler * fmt * Update examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl * Update examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl * Update NEWS.md Co-authored-by: Andrew Winters * Andrew's suggestions * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * add domain of u * back to carpenter kennedy * Update NEWS.md Co-authored-by: Andrew Winters --------- Co-authored-by: Andrew Winters Co-authored-by: Hendrik Ranocha --- NEWS.md | 1 + README.md | 2 +- .../elixir_traffic_flow_lwr_greenlight.jl | 80 ++++++++++++ .../elixir_traffic_flow_lwr_convergence.jl | 54 ++++++++ .../elixir_traffic_flow_lwr_trafficjam.jl | 82 +++++++++++++ src/Trixi.jl | 3 +- src/equations/equations.jl | 5 + src/equations/traffic_flow_lwr_1d.jl | 116 ++++++++++++++++++ test/test_structured_1d.jl | 15 +++ test/test_tree_1d.jl | 3 + test/test_tree_1d_traffic_flow_lwr.jl | 42 +++++++ 11 files changed, 401 insertions(+), 2 deletions(-) create mode 100644 examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl create mode 100644 examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl create mode 100644 examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl create mode 100644 src/equations/traffic_flow_lwr_1d.jl create mode 100644 test/test_tree_1d_traffic_flow_lwr.jl diff --git a/NEWS.md b/NEWS.md index 02a723fca45..feccd1f9852 100644 --- a/NEWS.md +++ b/NEWS.md @@ -12,6 +12,7 @@ for human readability. - Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` +- Added Lighthill-Whitham-Richards (LWR) traffic model ## Changes when updating to v0.6 from v0.5.x diff --git a/README.md b/README.md index c531ab4d1a4..71370d3478e 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ installation and postprocessing procedures. Its features include: * Hyperbolic diffusion equations for elliptic problems * Lattice-Boltzmann equations (D2Q9 and D3Q27 schemes) * Shallow water equations - * Several scalar conservation laws (e.g., linear advection, Burgers' equation) + * Several scalar conservation laws (e.g., linear advection, Burgers' equation, LWR traffic flow) * Multi-physics simulations * [Self-gravitating gas dynamics](https://github.com/trixi-framework/paper-self-gravitating-gas-dynamics) * Shared-memory parallelization via multithreading diff --git a/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl new file mode 100644 index 00000000000..e5badf14451 --- /dev/null +++ b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl @@ -0,0 +1,80 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +solver = DGSEM(polydeg = 3, surface_flux = FluxHLL(min_max_speed_davis)) + +coordinates_min = (-1.0,) # minimum coordinate +coordinates_max = (1.0,) # maximum coordinate +cells_per_dimension = (64,) + +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max, + periodicity = false) + +# Example inspired from http://www.clawpack.org/riemann_book/html/Traffic_flow.html#Example:-green-light +# Green light that at x = 0 which switches at t = 0 from red to green. +# To the left there are cars bumper to bumper, to the right there are no cars. +function initial_condition_greenlight(x, t, equation::TrafficFlowLWREquations1D) + scalar = x[1] < 0.0 ? 1.0 : 0.0 + + return SVector(scalar) +end + +############################################################################### +# Specify non-periodic boundary conditions + +# Assume that there are always cars waiting at the left +function inflow(x, t, equations::TrafficFlowLWREquations1D) + return initial_condition_greenlight(coordinates_min, t, equations) +end +boundary_condition_inflow = BoundaryConditionDirichlet(inflow) + +# Cars may leave the modeled domain +function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::TrafficFlowLWREquations1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, orientation, equations) + + return flux +end + +boundary_conditions = (x_neg = boundary_condition_inflow, + x_pos = boundary_condition_outflow) + +initial_condition = initial_condition_greenlight + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl new file mode 100644 index 00000000000..59258018f8c --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl @@ -0,0 +1,54 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +# Use first order finite volume to prevent oscillations at the shock +solver = DGSEM(polydeg = 3, surface_flux = flux_hll) + +coordinates_min = 0.0 # minimum coordinate +coordinates_max = 2.0 # maximum coordinate + +# Create a uniformly refined mesh with periodic boundaries +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + +############################################################################### +# Specify non-periodic boundary conditions + +initial_condition = initial_condition_convergence_test +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl new file mode 100644 index 00000000000..d3a17b513fc --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl @@ -0,0 +1,82 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +# Use first order finite volume to prevent oscillations at the shock +solver = DGSEM(polydeg = 0, surface_flux = flux_lax_friedrichs) + +coordinates_min = -1.0 # minimum coordinate +coordinates_max = 1.0 # maximum coordinate + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 9, + n_cells_max = 30_000, + periodicity = false) + +# Example taken from http://www.clawpack.org/riemann_book/html/Traffic_flow.html#Example:-Traffic-jam +# Discontinuous initial condition (Riemann Problem) leading to a shock that moves to the left. +# The shock corresponds to the traffic congestion. +function initial_condition_traffic_jam(x, t, equation::TrafficFlowLWREquations1D) + scalar = x[1] < 0.0 ? 0.5 : 1.0 + + return SVector(scalar) +end + +############################################################################### +# Specify non-periodic boundary conditions + +function outflow(x, t, equations::TrafficFlowLWREquations1D) + return initial_condition_traffic_jam(coordinates_min, t, equations) +end +boundary_condition_outflow = BoundaryConditionDirichlet(outflow) + +function boundary_condition_inflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::TrafficFlowLWREquations1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, orientation, equations) + + return flux +end + +boundary_conditions = (x_neg = boundary_condition_outflow, + x_pos = boundary_condition_inflow) + +initial_condition = initial_condition_traffic_jam + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# Note: Be careful when increasing the polynomial degree and switching from first order finite volume +# to some actual DG method - in that case, you should also exchange the ODE solver. +sol = solve(ode, Euler(), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index 8ab8085d4e8..bf0986084af 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -157,7 +157,8 @@ export AcousticPerturbationEquations2D, ShallowWaterTwoLayerEquations1D, ShallowWaterTwoLayerEquations2D, ShallowWaterEquationsQuasi1D, LinearizedEulerEquations2D, - PolytropicEulerEquations2D + PolytropicEulerEquations2D, + TrafficFlowLWREquations1D export LaplaceDiffusion1D, LaplaceDiffusion2D, LaplaceDiffusion3D, CompressibleNavierStokesDiffusion1D, CompressibleNavierStokesDiffusion2D, diff --git a/src/equations/equations.jl b/src/equations/equations.jl index c041bf117ba..65875a2a7e5 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -507,4 +507,9 @@ include("linearized_euler_2d.jl") abstract type AbstractEquationsParabolic{NDIMS, NVARS, GradientVariables} <: AbstractEquations{NDIMS, NVARS} end + +# Lighthill-Witham-Richards (LWR) traffic flow model +abstract type AbstractTrafficFlowLWREquations{NDIMS, NVARS} <: + AbstractEquations{NDIMS, NVARS} end +include("traffic_flow_lwr_1d.jl") end # @muladd diff --git a/src/equations/traffic_flow_lwr_1d.jl b/src/equations/traffic_flow_lwr_1d.jl new file mode 100644 index 00000000000..a4d2613a5c8 --- /dev/null +++ b/src/equations/traffic_flow_lwr_1d.jl @@ -0,0 +1,116 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + TrafficFlowLWREquations1D + +The classic Lighthill-Witham Richards (LWR) model for 1D traffic flow. +The car density is denoted by $u \in [0, 1]$ and +the maximum possible speed (e.g. due to speed limits) is $v_{\text{max}}$. +```math +\partial_t u + v_{\text{max}} \partial_1 [u (1 - u)] = 0 +``` +For more details see e.g. Section 11.1 of +- Randall LeVeque (2002) +Finite Volume Methods for Hyperbolic Problems +[DOI: 10.1017/CBO9780511791253]https://doi.org/10.1017/CBO9780511791253 +""" +struct TrafficFlowLWREquations1D{RealT <: Real} <: AbstractTrafficFlowLWREquations{1, 1} + v_max::RealT + + function TrafficFlowLWREquations1D(v_max = 1.0) + new{typeof(v_max)}(v_max) + end +end + +varnames(::typeof(cons2cons), ::TrafficFlowLWREquations1D) = ("car-density",) +varnames(::typeof(cons2prim), ::TrafficFlowLWREquations1D) = ("car-density",) + +""" + initial_condition_convergence_test(x, t, equations::TrafficFlowLWREquations1D) + +A smooth initial condition used for convergence tests. +""" +function initial_condition_convergence_test(x, t, equations::TrafficFlowLWREquations1D) + c = 2.0 + A = 1.0 + L = 1 + f = 1 / L + omega = 2 * pi * f + scalar = c + A * sin(omega * (x[1] - t)) + + return SVector(scalar) +end + +""" + source_terms_convergence_test(u, x, t, equations::TrafficFlowLWREquations1D) + +Source terms used for convergence tests in combination with +[`initial_condition_convergence_test`](@ref). +""" +@inline function source_terms_convergence_test(u, x, t, + equations::TrafficFlowLWREquations1D) + # Same settings as in `initial_condition` + c = 2.0 + A = 1.0 + L = 1 + f = 1 / L + omega = 2 * pi * f + du = omega * cos(omega * (x[1] - t)) * + (-1 - equations.v_max * (2 * sin(omega * (x[1] - t)) + 3)) + + return SVector(du) +end + +# Calculate 1D flux in for a single point +@inline function flux(u, orientation::Integer, equations::TrafficFlowLWREquations1D) + return SVector(equations.v_max * u[1] * (1.0 - u[1])) +end + +# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation +@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + λ_max = max(abs(equations.v_max * (1.0 - 2 * u_ll[1])), + abs(equations.v_max * (1.0 - 2 * u_rr[1]))) +end + +# Calculate minimum and maximum wave speeds for HLL-type fluxes +@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + jac_L = equations.v_max * (1.0 - 2 * u_ll[1]) + jac_R = equations.v_max * (1.0 - 2 * u_rr[1]) + + λ_min = min(jac_L, jac_R) + λ_max = max(jac_L, jac_R) + + return λ_min, λ_max +end + +@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + min_max_speed_naive(u_ll, u_rr, orientation, equations) +end + +@inline function max_abs_speeds(u, equations::TrafficFlowLWREquations1D) + return (abs(equations.v_max * (1.0 - 2 * u[1])),) +end + +# Convert conservative variables to primitive +@inline cons2prim(u, equations::TrafficFlowLWREquations1D) = u + +# Convert conservative variables to entropy variables +@inline cons2entropy(u, equations::TrafficFlowLWREquations1D) = u + +# Calculate entropy for a conservative state `cons` +@inline entropy(u::Real, ::TrafficFlowLWREquations1D) = 0.5 * u^2 +@inline entropy(u, equations::TrafficFlowLWREquations1D) = entropy(u[1], equations) + +# Calculate total energy for a conservative state `cons` +@inline energy_total(u::Real, ::TrafficFlowLWREquations1D) = 0.5 * u^2 +@inline energy_total(u, equations::TrafficFlowLWREquations1D) = energy_total(u[1], + equations) +end # @muladd diff --git a/test/test_structured_1d.jl b/test/test_structured_1d.jl index f0eecfa9acd..fea06554c57 100644 --- a/test/test_structured_1d.jl +++ b/test/test_structured_1d.jl @@ -138,6 +138,21 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_traffic_flow_lwr_greenlight.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_traffic_flow_lwr_greenlight.jl"), + l2=[0.2005523261652845], + linf=[0.5052827913468407]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory diff --git a/test/test_tree_1d.jl b/test/test_tree_1d.jl index 4654f6313f7..8b470278ffd 100644 --- a/test/test_tree_1d.jl +++ b/test/test_tree_1d.jl @@ -47,6 +47,9 @@ isdir(outdir) && rm(outdir, recursive = true) # FDSBP methods on the TreeMesh include("test_tree_1d_fdsbp.jl") + + # Traffic flow LWR + include("test_tree_1d_traffic_flow_lwr.jl") end # Coverage test for all initial conditions diff --git a/test/test_tree_1d_traffic_flow_lwr.jl b/test/test_tree_1d_traffic_flow_lwr.jl new file mode 100644 index 00000000000..54412e314b3 --- /dev/null +++ b/test/test_tree_1d_traffic_flow_lwr.jl @@ -0,0 +1,42 @@ +module TestExamples1DTrafficFlowLWR + +using Test +using Trixi + +include("test_trixi.jl") + +EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") + +@testset "Traffic-flow LWR" begin +#! format: noindent + +@trixi_testset "elixir_traffic_flow_lwr_convergence.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_traffic_flow_lwr_convergence.jl"), + l2=[0.0008455067389588569], + linf=[0.004591951086623913]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_traffic_flow_lwr_trafficjam.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_traffic_flow_lwr_trafficjam.jl"), + l2=[0.1761758135539748], linf=[0.5]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end +end + +end # module From c5e743aa1b229562132e3bbfbd936e995404739f Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Tue, 20 Feb 2024 18:38:26 +0000 Subject: [PATCH 110/166] WIP: Sc/polytropic 2d wave speed (#1816) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added coupling converters. * Added generic converter_function for structured 2d meshes. * Added example elixir for coupling converters. * Cleaned up converter coupling elixir. * Added equations in coupling converters. * Added converter functions. * Added identity converter function. * Autoformat for converter coupling implementation. * Added coupled converter elixir. * Corrected file name of coupled converters test. * Removed redundant doc string. * Added function signature in doc string. * Removed coverage_override in coupled tests. * Removed old commented code. * Update make.jl Added interface coupling docs to the main menu. * Update make.jl Moved converter coupling section. * Create coupling.md * Update coupling.md Added some documentation on coupling converters. * Removed troublesome AnalysisCallbackCoupled from test. * Chenged coupling converter function. * Changed coupling converter function and updated tests. * Sepcialized coupling function call. * Removed volume coupling from documentation to avoit confusion. * Update src/coupling_converters/coupling_converters.jl Co-authored-by: Hendrik Ranocha * Removed redundant converter function for coupling. * Removed redundant coupling converter file mentioned in some files. * Autoreformatted. * Removed old coupled elixir and replaced it with one using converter functions. * Updated errors for coupled tests. * Corrected test results for coupled equations. * Corrected comment. * Removed coupled test from special tests. * Removed coupled test from specials. * Chaned the coupling function to the identity. * Updated coupling tests. * Updated errors for coupled test. * Added advice about binary compatability for coupled equations in the documentation. * Typo. * Added numerical fluxes. * Corrected rs copy routine. Now loop over this semi's components. * Reformatted equations source file. * Removed problemating include of time_integration.jl. * Removed export of deleted methods. * Reverted to old version of compressible Euler multicomponent with no support for structured grid. * Renamed documentation file for multi-physics coupling. * Renamed doc reference. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Reinstated structured_2d_dgsem coupled in special tests. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Renamed CouplingFunction to CouplingConverter. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Cleaned the copy of coupled boundary values. * Reduced time span for example coupling elixir. * Removed redundant loop. * Applied formatter. * Removed default coupling covnerter function. * Moved coupling converter function into elixir. * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/make.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Removed coupling_converters.jl from the include. * Corrected introduced issue with coupling boundary copy. The latest change to clean up the boundary copying introduced a bug related to the determination of the wrong node indices. This is now corrected. * Corrected comment on final simulation time. * Updated errors for coupled test to reflect changed final simulation time. * Added miladd. * Corrected coordinate finding in semidiscretization_coupled. * Fixed issued related to memory allocation. * Corrected loop over semidiscretization. * Removed commented out code. * Fixed type instability with loops over semidiscretizations using lispy tuple programming. * Removed obsolete code. * Fixed another typa instability in coupled semidiscretization. * Cleaning up of the coupled semidiscretization. * Autoformatted coupled semidiscretization. * Fixed last type instability in coupling. * Autoformatter on semidiscretization. * Fixed bug in boundary values copy that arose when coupling multiple systems. * aplpied autoformatter on coupled semidiscretization. * Extended the structured 2d example elixir for the coupled advection to 4 semidiscretizations. This hase two purpuses: 1. Users are given an example fro 2d coupling avoiding common pitfalls. 2. This increases the code coverege for the test. * Updated test results for coupled advection in 2d to reflect the 4 semidiscretizations that are now used. * Added correct errors for tests for the coupled adveciton equations in structured 2d. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Corrected foreach_enumerate implementation. * Fix closing parens * Remove unused recursive rhs! * Pass equations to converter function * Apply formatting * Reverted copy_to_coupled_boundary to previou version to avoid type instability. * Corrected computation of coupled semidiscretizations and fixed memory issue. * Removed redundant nelements function, as it is no longer used. * Applied autoformatter. * Added max_abs_speed_naive( and max_abs_speed_naive for PolytropicEulerEquations2D. * Reverted coupling elixir to main branch version. The modified version should be part of a different PR. * Removed some modified coupling code as this should be part of a different PR. * Reverted changes on ooupling semidiscretization as this should be part of a different PR. * Reverted changes partaining the coupling PR. * Removed changes partaining coupling PR. * REverted to version including elixir_euler_warm_bubble.jl tests. * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrés Rueda-Ramírez * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrés Rueda-Ramírez * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Daniel Doehring * Added consistency and rotation test for LAx-friedrich fluxes for polytropic equations in 2d. * Applied auto-formatter on polytropic 2d equation. * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Co-authored-by: Andrew Winters Co-authored-by: Andrés Rueda-Ramírez Co-authored-by: Daniel Doehring Co-authored-by: iomsn --- src/equations/polytropic_euler_2d.jl | 40 ++++++++++++++++++++++++++++ test/test_unit.jl | 24 +++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/src/equations/polytropic_euler_2d.jl b/src/equations/polytropic_euler_2d.jl index f5d2f7b0bad..e900fd64073 100644 --- a/src/equations/polytropic_euler_2d.jl +++ b/src/equations/polytropic_euler_2d.jl @@ -301,6 +301,46 @@ end return abs(v1) + c, abs(v2) + c end +# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the +# maximum velocity magnitude plus the maximum speed of sound +@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + + # Get the velocity value in the appropriate direction + if orientation == 1 + v_ll = v1_ll + v_rr = v1_rr + else # orientation == 2 + v_ll = v2_ll + v_rr = v2_rr + end + # Calculate sound speeds (we have p = kappa * rho^gamma) + c_ll = sqrt(equations.gamma * equations.kappa * rho_ll^(equations.gamma - 1)) + c_rr = sqrt(equations.gamma * equations.kappa * rho_rr^(equations.gamma - 1)) + + λ_max = max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) +end + +@inline function max_abs_speed_naive(u_ll, u_rr, normal_direction::AbstractVector, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + + # Calculate normal velocities and sound speed (we have p = kappa * rho^gamma) + # left + v_ll = (v1_ll * normal_direction[1] + + v2_ll * normal_direction[2]) + c_ll = sqrt(equations.gamma * equations.kappa * rho_ll^(equations.gamma - 1)) + # right + v_rr = (v1_rr * normal_direction[1] + + v2_rr * normal_direction[2]) + c_rr = sqrt(equations.gamma * equations.kappa * rho_rr^(equations.gamma - 1)) + + return max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) * norm(normal_direction) +end + # Convert conservative variables to primitive @inline function cons2prim(u, equations::PolytropicEulerEquations2D) rho, rho_v1, rho_v2 = u diff --git a/test/test_unit.jl b/test/test_unit.jl index 3b8dc3c4331..c1379587cc8 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -858,6 +858,30 @@ end end end +@timed_testset "Consistency check for Lax-Friedrich flux: Polytropic CEE" begin + for gamma in [1.4, 1.0, 5 / 3] + kappa = 0.5 # Scaling factor for the pressure. + equations = PolytropicEulerEquations2D(gamma, kappa) + u = SVector(1.1, -0.5, 2.34) + + orientations = [1, 2] + for orientation in orientations + @test flux_lax_friedrichs(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + + for normal_direction in normal_directions + @test flux_lax_friedrichs(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end + end +end + @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: LEE" begin flux_hll = FluxHLL(min_max_speed_davis) From e98a76b92fa1272b547b2abc997cfb1d885012f4 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 21 Feb 2024 11:02:15 +0100 Subject: [PATCH 111/166] improve benchmarks --- benchmark/benchmarks.jl | 12 ++++++++---- benchmark/run_benchmarks.jl | 4 ++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 15d1d96c05f..0d6fabcd4a9 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -2,6 +2,9 @@ # readability #! format: off +using Pkg +Pkg.activate(@__DIR__) + using BenchmarkTools using Trixi @@ -47,13 +50,14 @@ end let SUITE["latency"] = BenchmarkGroup() SUITE["latency"]["default_example"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(default_example())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(default_example())'`) seconds=60 for polydeg in [3, 7] command = "using Trixi; trixi_include(joinpath(examples_dir(), \"tree_2d_dgsem\", \"elixir_advection_extended.jl\"), tspan=(0.0, 1.0e-10), polydeg=$(polydeg), save_restart=TrivialCallback(), save_solution=TrivialCallback())" - SUITE["latency"]["polydeg_$polydeg"] = @benchmarkable run($`$(Base.julia_cmd()) -e $command`) seconds=60 + SUITE["latency"]["polydeg_$polydeg"] = @benchmarkable run( + $`$(Base.julia_cmd()) --project=$(@__DIR__) -e $command`) seconds=60 end SUITE["latency"]["euler_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 SUITE["latency"]["mhd_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 end diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index 3a92a9ba700..e4e15223ea7 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -1,3 +1,7 @@ +using Pkg +Pkg.activate(@__DIR__) +Pkg.develop(PackageSpec(path=dirname(@__DIR__))) +Pkg.instantiate() using PkgBenchmark using Trixi From 40b73cfde2a707dfc8d76ea92712a2287fd2e9e2 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 21 Feb 2024 11:12:54 +0100 Subject: [PATCH 112/166] format benchmarks --- benchmark/run_benchmarks.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index e4e15223ea7..7b8c25752f8 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -1,6 +1,6 @@ using Pkg Pkg.activate(@__DIR__) -Pkg.develop(PackageSpec(path=dirname(@__DIR__))) +Pkg.develop(PackageSpec(path = dirname(@__DIR__))) Pkg.instantiate() using PkgBenchmark From c3c0986a2eaf52a145cd59ab591ea3af75b40571 Mon Sep 17 00:00:00 2001 From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Date: Wed, 21 Feb 2024 11:39:41 +0100 Subject: [PATCH 113/166] Create Downgrade.yml (second try) (#1848) * add downgrade.yml and bump compats * also downgrade test/Project.toml * bump lower compat for ForwardDiff in tests * allow older version of HDF5 * allow newest versions of Makie and CairoMakie * allow newest version of T8code * remove compat 0.11 from CairoMakie --- .github/workflows/Downgrade.yml | 86 +++++++++++++++++++++++++++++++++ Project.toml | 24 ++++----- test/Project.toml | 6 +-- 3 files changed, 101 insertions(+), 15 deletions(-) create mode 100644 .github/workflows/Downgrade.yml diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml new file mode 100644 index 00000000000..c84b1026d1b --- /dev/null +++ b/.github/workflows/Downgrade.yml @@ -0,0 +1,86 @@ +name: Downgrade + +on: + pull_request: + paths-ignore: + - 'AUTHORS.md' + - 'CITATION.bib' + - 'CONTRIBUTING.md' + - 'LICENSE.md' + - 'NEWS.md' + - 'README.md' + - '.zenodo.json' + - '.github/workflows/benchmark.yml' + - '.github/workflows/CompatHelper.yml' + - '.github/workflows/TagBot.yml' + - 'benchmark/**' + - 'docs/**' + - 'utils/**' + workflow_dispatch: + +# Cancel redundant CI tests automatically +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + downgrade_test: + if: "!contains(github.event.head_commit.message, 'skip ci')" + # We could also include the Julia version as in + # name: ${{ matrix.trixi_test }} - ${{ matrix.os }} - Julia ${{ matrix.version }} - ${{ matrix.arch }} - ${{ github.event_name }} + # to be more specific. However, that requires us updating the required CI tests whenever we update Julia. + name: Downgrade ${{ matrix.trixi_test }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + version: + - '1.9' + # - '~1.9.0-0' # including development versions + # - 'nightly' + os: + - ubuntu-latest + arch: + - x64 + trixi_test: + # - tree_part1 + # - tree_part2 + # - tree_part3 + # - tree_part4 + # - tree_part5 + # - tree_part6 + # - structured + # - p4est_part1 + # - p4est_part2 + # - t8code_part1 + # - unstructured_dgmulti + # - parabolic + # - paper_self_gravitating_gas_dynamics + # - misc_part1 + # - misc_part2 + # - performance_specializations_part1 + # - performance_specializations_part2 + # - mpi + - threaded + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v1 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)' + - uses: julia-actions/cache@v1 + - uses: julia-actions/julia-downgrade-compat@v1 + with: + skip: LinearAlgebra,Printf,SparseArrays,DiffEqBase + projects: ., test + - uses: julia-actions/julia-buildpkg@v1 + env: + PYTHON: "" + - name: Run tests without coverage + uses: julia-actions/julia-runtest@v1 + with: + coverage: false + env: + PYTHON: "" + TRIXI_TEST: ${{ matrix.trixi_test }} diff --git a/Project.toml b/Project.toml index af3e6b4d078..9bed045637a 100644 --- a/Project.toml +++ b/Project.toml @@ -62,17 +62,17 @@ DiffEqCallbacks = "2.25" Downloads = "1.6" EllipsisNotation = "1.0" FillArrays = "0.13.2, 1" -ForwardDiff = "0.10.18" -HDF5 = "0.14, 0.15, 0.16, 0.17" +ForwardDiff = "0.10.24" +HDF5 = "0.16.10, 0.17" IfElse = "0.1" LinearAlgebra = "1" LinearMaps = "2.7, 3.0" -LoopVectorization = "0.12.118" +LoopVectorization = "0.12.151" MPI = "0.20" Makie = "0.19, 0.20" MuladdMacro = "0.2.2" -Octavian = "0.3.5" -OffsetArrays = "1.3" +Octavian = "0.3.21" +OffsetArrays = "1.12" P4est = "0.4.9" Polyester = "0.7.5" PrecompileTools = "1.1" @@ -81,19 +81,19 @@ RecipesBase = "1.1" Reexport = "1.0" Requires = "1.1" SciMLBase = "1.90, 2" -Setfield = "0.8, 1" +Setfield = "1" SimpleUnPack = "1.1" SparseArrays = "1" -StartUpDG = "0.17" -Static = "0.3, 0.4, 0.5, 0.6, 0.7, 0.8" +StartUpDG = "0.17.7" +Static = "0.8.7" StaticArrayInterface = "1.4" -StaticArrays = "1" -StrideArrays = "0.1.18" -StructArrays = "0.6" +StaticArrays = "1.5" +StrideArrays = "0.1.26" +StructArrays = "0.6.11" SummationByPartsOperators = "0.5.41" T8code = "0.4.3, 0.5" TimerOutputs = "0.5.7" -Triangulate = "2.0" +Triangulate = "2.2" TriplotBase = "0.1" TriplotRecipes = "0.1" TrixiBase = "0.1.1" diff --git a/test/Project.toml b/test/Project.toml index ecae0ac0900..a376c2805ea 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -13,13 +13,13 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] Aqua = "0.8" -CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" +CairoMakie = "0.10" Downloads = "1" -ForwardDiff = "0.10" +ForwardDiff = "0.10.24" LinearAlgebra = "1" MPI = "0.20" OrdinaryDiffEq = "6.49.1" -Plots = "1.16" +Plots = "1.19" Printf = "1" Random = "1" Test = "1" From 1b2abd00e9b74d2cd185b0d06c5cf3182011e6c4 Mon Sep 17 00:00:00 2001 From: ArseniyKholod <119304909+ArseniyKholod@users.noreply.github.com> Date: Wed, 21 Feb 2024 12:48:03 +0200 Subject: [PATCH 114/166] Doc: Core aspects of the basic setup (#1699) * Doc: Core aspects of the basic setup * update pictures * Update innards_of_the_basic_setup.jl * mention ODE-Solvers * Revert "Revert "Merge branch 'main' into semidiscretization-doc"" This reverts commit 85d6e8b50261885ad198ae4036e8599189912e56. * Revert "Merge branch 'main' into semidiscretization-doc" This reverts commit b8f8b0bc167a476b3dead39f6aadf4fe43601e7d, reversing changes made to bb518f50053cef054220fd35542f52a47214a997. * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Rename innards_of_the_basic_setup.jl to behind_the_scenes_simulation_setup.jl * add plot scripts * Format_and_last_review_changes * spell+output_directory_figures * spell * N->polydeg * add README for plots * line length <=100 * Update docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md * add empty lines * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * mention method of lines * Update behind_the_scenes_simulation_setup.jl * Update behind_the_scenes_simulation_setup.jl * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * simplify rhs description * format * add interpolation to mortars * Update behind_the_scenes_simulation_setup.jl * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * add resizability explanation * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * format * add introduction as 2nd tutorial * fix * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * add unsafe_wrap explanation --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Andrew Winters Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Daniel Doehring --- .../behind_the_scenes_simulation_setup.jl | 253 ++++++++++++++++++ .../Project.toml | 2 + .../README.md | 15 ++ ...scretizationHyperbolic_structure_figure.jl | 64 +++++ .../src/generate_boundary_figure.jl | 190 +++++++++++++ .../src/generate_elements_figure.jl | 117 ++++++++ .../src/generate_interfaces_figure.jl | 157 +++++++++++ .../src/generate_mortars_figure.jl | 166 ++++++++++++ .../src/generate_nodes_figure.jl | 6 + .../src/generate_treemesh_figure.jl | 26 ++ .../src/rhs_structure_figure.jl | 43 +++ .../src/semidiscretize_structure_figure.jl | 51 ++++ docs/literate/src/files/index.jl | 45 ++-- docs/make.jl | 3 +- 14 files changed, 1119 insertions(+), 19 deletions(-) create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_elements_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_interfaces_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_mortars_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_nodes_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_treemesh_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/rhs_structure_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/semidiscretize_structure_figure.jl diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup.jl new file mode 100644 index 00000000000..c93660e9bc1 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup.jl @@ -0,0 +1,253 @@ +#src # Behind the scenes of a simulation setup + +# This tutorial will guide you through a simple Trixi.jl setup ("elixir"), giving an overview of +# what happens in the background during the initialization of a simulation. While the setup +# described herein does not cover all details, it involves relatively stable parts of Trixi.jl that +# are unlikely to undergo significant changes in the near future. The goal is to clarify some of +# the more fundamental, *technical* concepts that are applicable to a variety of +# (also more complex) configurations. + +# Trixi.jl follows the [method of lines](http://www.scholarpedia.org/article/Method_of_lines) concept for solving partial differential equations (PDEs). +# Firstly, the PDEs are reduced to a (potentially huge) system of +# ordinary differential equations (ODEs) by discretizing the spatial derivatives. Subsequently, +# these generated ODEs may be solved with methods available in OrdinaryDiffEq.jl or those specifically +# implemented in Trixi.jl. The following steps elucidate the process of transitioning from PDEs to +# ODEs within the framework of Trixi.jl. + +# ## Basic setup + +# Import essential libraries and specify an equation. + +using Trixi, OrdinaryDiffEq +equations = LinearScalarAdvectionEquation2D((-0.2, 0.7)) + +# Generate a spatial discretization using a [`TreeMesh`](@ref) with a pre-coarsened set of cells. + +coordinates_min = (-2.0, -2.0) +coordinates_max = (2.0, 2.0) + +coarsening_patches = ((type = "box", coordinates_min = [0.0, -2.0], + coordinates_max = [2.0, 0.0]),) + +mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 2, + n_cells_max = 30_000, + coarsening_patches = coarsening_patches) + +# The created `TreeMesh` looks like the following: + +# ![TreeMesh_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/d5ef76ee-8246-4730-a692-b472c06063a3) + +# Instantiate a [`DGSEM`](@ref) solver with a user-specified polynomial degree. The solver +# will define `polydeg + 1` [Gauss-Lobatto nodes](https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss%E2%80%93Lobatto_rules) and their associated weights within +# the reference interval ``[-1, 1]`` in each spatial direction. These nodes will be subsequently +# used to approximate solutions on each leaf cell of the `TreeMesh`. + +solver = DGSEM(polydeg = 3) + +# Gauss-Lobatto nodes with `polydeg = 3`: + +# ![Gauss-Lobatto_nodes_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/1d894611-801e-4f75-bff0-d77ca1c672e5) + +# ## Overview of the [`SemidiscretizationHyperbolic`](@ref) type + +# At this stage, all necessary components for configuring the spatial discretization are in place. +# The remaining task is to combine these components into a single structure that will be used +# throughout the entire simulation process. This is where [`SemidiscretizationHyperbolic`](@ref) +# comes into play. + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +# The constructor for the `SemidiscretizationHyperbolic` object calls numerous sub-functions to +# perform the necessary initialization steps. A brief description of the key sub-functions is +# provided below. + + +# - `init_elements(leaf_cell_ids, mesh, equations, dg.basis, RealT, uEltype)` + +# The fundamental elements for approximating the solution are the leaf +# cells. The solution is constructed as a polynomial of the degree specified in the `DGSEM` +# solver in each spatial direction on each leaf cell. This polynomial approximation is evaluated +# at the Gauss-Lobatto nodes mentioned earlier. The `init_elements` function extracts +# these leaf cells from the `TreeMesh`, assigns them the label "elements", records their +# coordinates, and maps the Gauss-Lobatto nodes from the 1D interval ``[-1, 1]`` onto each coordinate axis +# of every element. + + +# ![elements_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/9f486670-b579-4e42-8697-439540c8bbb4) + +# The visualization of elements with nodes shown here includes spaces between elements, which do +# not exist in reality. This spacing is included only for illustrative purposes to underscore the +# separation of elements and the independent projection of nodes onto each element. + + +# - `init_interfaces(leaf_cell_ids, mesh, elements)` + +# At this point, the elements with nodes have been defined; however, they lack the necessary +# communication functionality. This is crucial because the local solution polynomials on the +# elements are not independent of each other. Furthermore, nodes on the boundary of adjacent +# elements share the same spatial location, which requires a method to combine this into a +# meaningful solution. +# Here [Riemann solvers](https://en.wikipedia.org/wiki/Riemann_solver#Approximate_solvers) +# come into play which can handle the principal ambiguity of a multi-valued solution at the +# same spatial location. + +# As demonstrated earlier, the elements can have varying sizes. Let us initially consider +# neighbors with equal size. For these elements, the `init_interfaces` function generates +# interfaces that store information about adjacent elements, their relative positions, and +# allocate containers for sharing solution data between neighbors during the solution process. + +# In our visualization, these interfaces would conceptually resemble tubes connecting the +# corresponding elements. + +# ![interfaces_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/bc3b6b02-afbc-4371-aaf7-c7bdc5a6c540) + + +# - `init_mortars(leaf_cell_ids, mesh, elements, dg.mortar)` + +# Returning to the consideration of different sizes among adjacent elements, within the +# `TreeMesh`, adjacent leaf cells can vary in side length by a maximum factor of two. This +# implies that a large element has one neighbor of +# equal size with a connection through an interface, or two neighbors at half the size, +# requiring a connection through so called "mortars". In 3D, a large element would have +# four small neighbor elements. + +# Mortars store information about the connected elements, their relative positions, and allocate +# containers for storing the solutions along the boundaries between these elements. + +# Due to the differing sizes of adjacent elements, it is not feasible to directly map boundary +# nodes of adjacent elements. Therefore, the concept of mortars employs a mass-conserving +# interpolation function to map boundary nodes from a larger element to a smaller one. + +# In our visualization, mortars are represented as branched tubes. + +# ![mortars_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/43a95a60-3a31-4b1f-8724-14049e7a0481) + + +# - `init_boundaries(leaf_cell_ids, mesh, elements)` + +# In order to apply boundary conditions, it is necessary to identify the locations of the +# boundaries. Therefore, we initialize a "boundaries" object, which records the elements that +# contain boundaries, specifies which side of an element is a boundary, stores the coordinates +# of boundary nodes, and allocates containers for managing solutions at these boundaries. + +# In our visualization, boundaries and their corresponding nodes are highlighted with green, +# semi-transparent lines. + +# ![boundaries_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/21996b20-4a22-4dfb-b16a-e2c22c2f29fe) + +# All the structures mentioned earlier are collected as a cache of type `NamedTuple`. Subsequently, +# an object of type `SemidiscretizationHyperbolic` is initialized using this cache, initial and +# boundary conditions, equations, mesh and solver. + +# In conclusion, the primary purpose of a `SemidiscretizationHyperbolic` is to collect equations, +# the geometric representation of the domain, and approximation instructions, creating specialized +# structures to interconnect these components in a manner that enables their utilization for +# the numerical solution of partial differential equations (PDEs). + +# As evident from the earlier description of `SemidiscretizationHyperbolic`, it comprises numerous +# functions called subsequently. Without delving into details, the structure of the primary calls +# are illustrated as follows: + +# ![SemidiscretizationHyperbolic_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/8bf59422-0537-4d7a-9f13-d9b2253c19d7) + +# ## Overview of the [`semidiscretize`](@ref) function + +# At this stage, we have defined the equations and configured the domain's discretization. The +# final step before solving is to select a suitable time span and apply the corresponding initial +# conditions, which are already stored in the initialized `SemidiscretizationHyperbolic` object. + +# The purpose of the [`semidiscretize`](@ref) function is to wrap the semidiscretization as an +# `ODEProblem` within the specified time interval. During this procedure the approximate solution +# is created at the given initial time via the specified `initial_condition` function from the +# `SemidiscretizationHyperbolic` object. This `ODEProblem` can be subsequently passed to the +# `solve` function from the [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) package +# or to [`Trixi.solve`](@ref). + +ode = semidiscretize(semi, (0.0, 1.0)); + +# The `semidiscretize` function involves a deep tree of subsequent calls, with the primary ones +# explained below. + + +# - `allocate_coefficients(mesh, equations, solver, cache)` + +# To apply initial conditions, a data structure ("container") needs to be generated to store the +# initial values of the target variables for each node within each element. + +# Since only one-dimensional `Array`s are `resize!`able in Julia, we use `Vector`s as an internal +# storage for the target variables and `resize!` them whenever needed, e.g. to change the number +# of elements. Then, during the solving process the same memory is reused by `unsafe_wrap`ping +# multi-dimensional `Array`s around the internal storage. + +# - `wrap_array(u_ode, semi)` + +# As previously noted, `u_ode` is constructed as a 1D vector to ensure compatibility with +# OrdinaryDiffEq.jl. However, for internal use within Trixi.jl, identifying which part of the +# vector relates to specific variables, elements, or nodes can be challenging. + +# This is why the `u_ode` vector is wrapped by the `wrap_array` function using `unsafe_wrap` +# to form a multidimensional array `u`. In this array, the first dimension corresponds to +# variables, followed by N dimensions corresponding to nodes for each of N space dimensions. +# The last dimension corresponds to the elements. +# Consequently, navigation within this multidimensional array becomes noticeably easier. + +# "Wrapping" in this context involves the creation of a reference to the same storage location +# but with an alternative structural representation. This approach enables the use of both +# instances `u` and `u_ode` as needed, so that changes are simultaneously reflected in both. +# This is possible because, from a storage perspective, they share the same stored data, while +# access to this data is provided in different ways. + + +# - `compute_coefficients!(u, initial_conditions, t, mesh::DG, equations, solver, cache)` + +# Now the variable `u`, intended to store solutions, has been allocated and wrapped, it is time +# to apply the initial conditions. The `compute_coefficients!` function calculates the initial +# conditions for each variable at every node within each element and properly stores them in the +# `u` array. + +# At this stage, the `semidiscretize` function has all the necessary components to initialize and +# return an `ODEProblem` object, which will be used by the `solve` function to compute the +# solution. + +# In summary, the internal workings of `semidiscretize` with brief descriptions can be presented +# as follows. + +# ![semidiscretize_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/491eddc4-aadb-4e29-8c76-a7c821d0674e) + +# ## Functions `solve` and `rhs!` + +# Once the `ODEProblem` object is initialized, the `solve` function and one of the ODE solvers from +# the OrdinaryDiffEq.jl package can be utilized to compute an approximated solution using the +# instructions contained in the `ODEProblem` object. + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), dt = 0.01, + save_everystep = false); + +# Since the `solve` function and the ODE solver have no knowledge +# of a particular spatial discretization, it is necessary to define a +# "right-hand-side function", `rhs!`, within Trixi.jl. + +# Trixi.jl includes a set of `rhs!` functions designed to compute `du`, i.e., +# ``\frac{\partial u}{\partial t}`` according to the structure +# of the setup. These `rhs!` functions calculate interface, mortars, and boundary fluxes, in +# addition to surface and volume integrals, in order to construct the `du` vector. This `du` vector +# is then used by the time integration method to obtain the solution at the subsequent time step. +# The `rhs!` function is called by time integration methods in each iteration of the solve loop +# within OrdinaryDiffEq.jl, with arguments `du`, `u`, `semidiscretization`, and the current time. + +# Trixi.jl uses a two-levels approach for `rhs!` functions. The first level is limited to a +# single function for each `semidiscretization` type, and its role is to redirect data to the +# target `rhs!` for specific solver and mesh types. This target `rhs!` function is responsible +# for calculating `du`. + +# Path from the `solve` function call to the appropriate `rhs!` function call: + +# ![rhs_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/dbea9a0e-25a4-4afa-855e-01f1ad619982) + +# Computed solution: + +using Plots +plot(sol) +pd = PlotData2D(sol) +plot!(getmesh(pd)) diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml new file mode 100644 index 00000000000..43aec5b7f54 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml @@ -0,0 +1,2 @@ +[deps] +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md new file mode 100644 index 00000000000..011b5c75860 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md @@ -0,0 +1,15 @@ +# Plots for the tutorial "Behind the scenes of a simulation setup" + +To create all the images for the tutorial, execute the following command from the directory of this `README.md`: +```julia +pkg> activate . +julia> include.(readdir("src"; join=true)) +``` +To create all images from a different directory, substitute `"src"` with the path to the `src` +folder. The resulting images will be generated in your current directory as PNG files. + +To generate a specific image, run the following command while replacing `"path/to/src"` and `"file_name"` with the appropriate values: +```julia +pkg> activate . +julia> include(joinpath("path/to/src", "file_name")) +``` \ No newline at end of file diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl new file mode 100644 index 00000000000..cae7b19d470 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl @@ -0,0 +1,64 @@ +using Plots +plot(Shape([(-2.3,4.5), (2.35,4.5), (2.35,2.5), (-2.3,2.5)]), linecolor="black", fillcolor="white", label=false,linewidth=2, size=(800,600), showaxis=false, grid=false, xlim=(-2.4,2.8), ylim=(-25,5.5)) +annotate!(2.3, 3.5, ("SemidiscretizationHyperbolic(mesh, equations, initial_conditions, solver; source_terms, +boundary_conditions, RealT, uEltype, initial_cache) ", 10, :black, :right)) +annotate!(-2.3, 1.5, ("creates and returns SemidiscretizationHyperbolic object, initialized using a mesh, equations, +initial_conditions, boundary_conditions, source_terms, solver and cache", 9, :black, :left)) +plot!([-1.2,-1.2],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([-1.2,-1.4],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([-1.2,-1.],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +annotate!(-1, -0.7, ("specialized for mesh +and solver types", 9, :black, :left)) +plot!([1.25,1.25],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([1.25,1.05],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([1.25,1.45],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +annotate!(1.48, -0.7, ("specialized for mesh +and boundary_conditions +types", 9, :black, :left)) + +plot!(Shape([(-2.3,-2), (-0.1,-2), (-0.1,-4), (-2.3,-4)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.2, -3, ("create_cache(mesh::TreeMesh, equations, + solver::Dg, RealT, uEltype)", 10, :black, :center)) +plot!([-2.22,-2.22],[-4,-22],arrow=false,color=:black,linewidth=2,label="") + +plot!(Shape([(-0.05,-2), (2.6,-2), (2.6,-4), (-0.05,-4)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(1.27, -3, ("digest_boundary_conditions(boundary_conditions, + mesh, solver, cache)", 10, :black, :center)) +annotate!(2.6, -5, ("if necessary, converts passed boundary_conditions + into a suitable form for processing by Trixi.jl", 9, :black, :right)) + +plot!(Shape([(-2,-6), (-0.55,-6), (-0.55,-7.1), (-2,-7.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -6.5, ("local_leaf_cells(mesh.tree)", 10, :black, :left)) +annotate!(-2, -7.5, ("returns cells for which an element needs to be created (i.e. all leaf cells)", 9, :black, :left)) +plot!([-2.22,-2],[-6.5,-6.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-9), (1.73,-9), (1.73,-10.1), (-2,-10.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -9.5, ("init_elements(leaf_cell_ids, mesh, equations, dg.basis, RealT, uEltype)", 10, :black, :left)) +annotate!(-2, -10.5, ("creates and initializes elements, projects Gauss-Lobatto basis onto each of them", 9, :black, :left)) +plot!([-2.22,-2],[-9.5,-9.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-12), (0.4,-12), (0.4,-13.1), (-2,-13.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -12.5, ("init_interfaces(leaf_cell_ids, mesh, elements)", 10, :black, :left)) +annotate!(-2, -13.5, ("creates and initializes interfaces between each pair of adjacent elements of the same size", 9, :black, :left)) +plot!([-2.22,-2],[-12.5,-12.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-15), (0.5,-15), (0.5,-16.1), (-2,-16.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -15.5, ("init_boundaries(leaf_cell_ids, mesh, elements)", 10, :black, :left)) +annotate!(-2, -17, ("creates and initializes boundaries, remembers each boundary element, as well as the coordinates of +each boundary node", 9, :black, :left)) +plot!([-2.22,-2],[-15.5,-15.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-1.6,-18), (1.3,-18), (1.3,-19.1), (-1.6,-19.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.55, -18.5, ("init_mortars(leaf_cell_ids, mesh, elements, dg.mortar)", 10, :black, :left)) +annotate!(-1.6, -20, ("creates and initializes mortars (type of interfaces) between each triple of adjacent coarsened +and corresponding small elements", 9, :black, :left)) +plot!([-2.22,-1.6],[-18.5,-18.5],arrow=true,color=:black,linewidth=2,label="") +annotate!(-2.15, -19, ("2D and 3D", 8, :black, :left)) + +plot!(Shape([(-2,-21), (1.5,-21), (1.5,-23.1), (-2,-23.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -22, ("create_cache(mesh, equations, dg.volume_integral, dg, uEltype) +for 2D and 3D create_cache(mesh, equations, dg.mortar, uEltype)", 10, :black, :left)) +annotate!(-2, -23.5, ("add specialized parts of the cache required to compute the volume integral, etc.", 9, :black, :left)) +plot!([-2.22,-2],[-22,-22],arrow=true,color=:black,linewidth=2,label="") + +savefig("./SemidiscretizationHyperbolic") \ No newline at end of file diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl new file mode 100644 index 00000000000..14475d21339 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl @@ -0,0 +1,190 @@ +using Plots + +function min(coordinates::Vector{Tuple{Float64, Float64}}, i) + min=coordinates[1][i] + for j in coordinates + if min>j[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if max ("first_steps", "create_first_setup.jl"), "Changing Trixi.jl itself" => ("first_steps", "changing_trixi.jl"), ], + "Behind the scenes of a simulation setup" => "behind_the_scenes_simulation_setup.jl", # Topic: DG semidiscretizations "Introduction to DG methods" => "scalar_linear_advection_1d.jl", "DGSEM with flux differencing" => "DGSEM_FluxDiff.jl", @@ -76,7 +77,7 @@ files = [ # Topic: other stuff "Explicit time stepping" => "time_stepping.jl", "Differentiable programming" => "differentiable_programming.jl", - "Custom semidiscretizations" => "custom_semidiscretization.jl" + "Custom semidiscretizations" => "custom_semidiscretization.jl", ] tutorials = create_tutorials(files) From 88e4a09b919548ad308e94dfe1b515947b581558 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 22 Feb 2024 13:49:15 +0100 Subject: [PATCH 115/166] Update benchmarking docs (#1849) * Update benchmarking docs * Update performance.md * Update docs/src/performance.md Co-authored-by: Daniel Doehring --------- Co-authored-by: Daniel Doehring --- docs/src/performance.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/docs/src/performance.md b/docs/src/performance.md index 82d7f501f63..40970e58c5c 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -106,7 +106,22 @@ resulting performance improvements of Trixi.jl are given in the following blog p We use [PkgBenchmark.jl](https://github.com/JuliaCI/PkgBenchmark.jl) to provide a standard set of benchmarks for Trixi.jl. The relevant benchmark script is [benchmark/benchmarks.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/benchmark/benchmarks.jl). -You can run a standard set of benchmarks via +To benchmark the changes made in a PR, please proceed as follows: + +1. Check out the latest `main` branch of your Trixi.jl development repository. +2. Check out the latest development branch of your PR. +3. Change your working directory to the `benchmark` directory of Trixi.jl. +4. Execute `julia run_benchmarks.jl`. + +This will take some hours to complete and requires at least 8 GiB of RAM. When everything is finished, some +output files will be created in the `benchmark` directory of Trixi.jl. + +!!! warning + Please note that the benchmark scripts use `--check-bounds=no` at the moment. + Thus, they will not work in any useful way for Julia v1.10 (and newer?), see + [Julia issue #50985](https://github.com/JuliaLang/julia/issues/50985). + +You can also run a standard set of benchmarks manually via ```julia julia> using PkgBenchmark, Trixi From 9f7eadb3c056ee63af051c1397bfe50fa9ba9a47 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 22 Feb 2024 14:13:39 +0100 Subject: [PATCH 116/166] set version to v0.6.10 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 9bed045637a..221e96643ae 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.10-pre" +version = "0.6.10" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 5185abd96a0319599d01d57d196ecabdaa083477 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 22 Feb 2024 14:13:52 +0100 Subject: [PATCH 117/166] set development version to v0.6.11-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 221e96643ae..551e069b934 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.10" +version = "0.6.11-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 029ddea44cedc076251a5e6c832af7f0bec4ed90 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 23 Feb 2024 05:25:00 +0100 Subject: [PATCH 118/166] Own `sqrt` and `log` returning `NaN` for "correct" multi-thread behaviour (#1781) * Introduce NaNMath for unsafe sqrt and log * performance measurements * implement log myself * Try out different log implementation * remove NaNMath, own implementation * remove unrelated * Update src/equations/compressible_euler_2d.jl * NaNSqrt for quasi 1d CEE * fmt * Update src/auxiliary/math.jl * Update src/auxiliary/math.jl * Update src/auxiliary/math.jl * for comparison * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * llvm version log * Catch ints in sqrt_ * Use sqrt_ log_ everywhere * docu * fmt * replace in comment * try exporting nan funcs * enable SIMD again * Bring back SIMD * doc * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * docstring fmt * remove redundant docstrings * no own names * fmt * revert unintended * revert * remove unintended * revert * fmt * comments * update test vals * test vals * test vals * Preferences * Update Project.toml * Update src/Trixi.jl * fmt * docstrings * docstrings * docstrings * compat info * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * escape " * fmt * fix benchmarks configuration * skip UUIDs in downgrade CI job * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * Update Project.toml Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- .github/workflows/Downgrade.yml | 2 +- Project.toml | 4 ++ src/Trixi.jl | 6 ++ src/auxiliary/math.jl | 97 +++++++++++++++++++++++++++++++++ test/test_parabolic_1d.jl | 12 ++-- test/test_unstructured_2d.jl | 6 +- 6 files changed, 117 insertions(+), 10 deletions(-) diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index c84b1026d1b..dd5d8ee7e32 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -72,7 +72,7 @@ jobs: - uses: julia-actions/cache@v1 - uses: julia-actions/julia-downgrade-compat@v1 with: - skip: LinearAlgebra,Printf,SparseArrays,DiffEqBase + skip: LinearAlgebra,Printf,SparseArrays,UUIDs,DiffEqBase projects: ., test - uses: julia-actions/julia-buildpkg@v1 env: diff --git a/Project.toml b/Project.toml index 551e069b934..6b27e6e9999 100644 --- a/Project.toml +++ b/Project.toml @@ -25,6 +25,7 @@ OffsetArrays = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" P4est = "7d669430-f675-4ae7-b43e-fab78ec5a902" Polyester = "f517fe37-dbe3-4b94-8317-1923a5111588" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" +Preferences = "21216c6a-2e73-6563-6e65-726566657250" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" @@ -46,6 +47,7 @@ Triangulate = "f7e6ffb2-c36d-4f8f-a77e-16e897189344" TriplotBase = "981d1d27-644d-49a2-9326-4793e63143c3" TriplotRecipes = "808ab39a-a642-4abf-81ff-4cb34ebbffa3" TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" +UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" [weakdeps] Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" @@ -76,6 +78,7 @@ OffsetArrays = "1.12" P4est = "0.4.9" Polyester = "0.7.5" PrecompileTools = "1.1" +Preferences = "1.3" Printf = "1" RecipesBase = "1.1" Reexport = "1.0" @@ -97,6 +100,7 @@ Triangulate = "2.2" TriplotBase = "0.1" TriplotRecipes = "0.1" TrixiBase = "0.1.1" +UUIDs = "1.6" julia = "1.8" [extras] diff --git a/src/Trixi.jl b/src/Trixi.jl index bf0986084af..b7f7767a9d8 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -76,6 +76,12 @@ using TrixiBase: TrixiBase using SimpleUnPack: @pack! using DataStructures: BinaryHeap, FasterForward, extract_all! +using UUIDs: UUID +using Preferences: @load_preference, set_preferences! + +const _PREFERENCE_SQRT = @load_preference("sqrt", "sqrt_Trixi_NaN") +const _PREFERENCE_LOG = @load_preference("log", "log_Trixi_NaN") + # finite difference SBP operators using SummationByPartsOperators: AbstractDerivativeOperator, AbstractNonperiodicDerivativeOperator, DerivativeOperator, diff --git a/src/auxiliary/math.jl b/src/auxiliary/math.jl index 38ea0bda8c8..9e3aaa181bf 100644 --- a/src/auxiliary/math.jl +++ b/src/auxiliary/math.jl @@ -5,6 +5,103 @@ @muladd begin #! format: noindent +const TRIXI_UUID = UUID("a7f1ee26-1774-49b1-8366-f1abc58fbfcb") + +""" + Trixi.set_sqrt_type(type; force = true) + +Set the `type` of the square root function to be used in Trixi.jl. +The default is `"sqrt_Trixi_NaN"` which returns `NaN` for negative arguments +instead of throwing an error. +Alternatively, you can set `type` to `"sqrt_Base"` to use the Julia built-in `sqrt` function +which provides a stack-trace of the error which might come in handy when debugging code. +""" +function set_sqrt_type(type; force = true) + @assert type == "sqrt_Trixi_NaN"||type == "sqrt_Base" "Only allowed `sqrt` function types are `\"sqrt_Trixi_NaN\"` and `\"sqrt_Base\"`" + set_preferences!(TRIXI_UUID, "sqrt" => type, force = force) + @info "Please restart Julia and reload Trixi.jl for the `sqrt` computation change to take effect" +end + +@static if _PREFERENCE_SQRT == "sqrt_Trixi_NaN" + """ + Trixi.sqrt(x::Real) + + Custom square root function which returns `NaN` for negative arguments instead of throwing an error. + This is required to ensure [correct results for multithreaded computations](https://github.com/trixi-framework/Trixi.jl/issues/1766) + when using the [`Polyester` package](https://github.com/JuliaSIMD/Polyester.jl), + i.e., using the `@batch` macro instead of the Julia built-in `@threads` macro, see [`@threaded`](@ref). + + We dispatch this function for `Float64, Float32, Float16` to the LLVM intrinsics + `llvm.sqrt.f64`, `llvm.sqrt.f32`, `llvm.sqrt.f16` as for these the LLVM functions can be used out-of the box, + i.e., they return `NaN` for negative arguments. + In principle, one could also use the `sqrt_llvm` call, but for transparency and consistency with [`log`](@ref) we + spell out the datatype-dependent functions here. + For other types, such as integers or dual numbers required for algorithmic differentiation, we + fall back to the Julia built-in `sqrt` function after a check for negative arguments. + Since these cases are not performance critical, the check for negativity does not hurt here + and can (as of now) even be optimized away by the compiler due to the implementation of `sqrt` in Julia. + + When debugging code, it might be useful to change the implementation of this function to redirect to + the Julia built-in `sqrt` function, as this reports the exact place in code where the domain is violated + in the stacktrace. + + See also [`Trixi.set_sqrt_type`](@ref). + """ + @inline sqrt(x::Real) = x < zero(x) ? oftype(x, NaN) : Base.sqrt(x) + + # For `sqrt` we could use the `sqrt_llvm` call, ... + #@inline sqrt(x::Union{Float64, Float32, Float16}) = Base.sqrt_llvm(x) + + # ... but for transparency and consistency we use the direct LLVM calls here. + @inline sqrt(x::Float64) = ccall("llvm.sqrt.f64", llvmcall, Float64, (Float64,), x) + @inline sqrt(x::Float32) = ccall("llvm.sqrt.f32", llvmcall, Float32, (Float32,), x) + @inline sqrt(x::Float16) = ccall("llvm.sqrt.f16", llvmcall, Float16, (Float16,), x) +end + +""" + Trixi.set_log_type(type; force = true) + +Set the `type` of the (natural) `log` function to be used in Trixi.jl. +The default is `"sqrt_Trixi_NaN"` which returns `NaN` for negative arguments +instead of throwing an error. +Alternatively, you can set `type` to `"sqrt_Base"` to use the Julia built-in `sqrt` function +which provides a stack-trace of the error which might come in handy when debugging code. +""" +function set_log_type(type; force = true) + @assert type == "log_Trixi_NaN"||type == "log_Base" "Only allowed log function types are `\"log_Trixi_NaN\"` and `\"log_Base\"`." + set_preferences!(TRIXI_UUID, "log" => type, force = force) + @info "Please restart Julia and reload Trixi.jl for the `log` computation change to take effect" +end + +@static if _PREFERENCE_LOG == "log_Trixi_NaN" + """ + Trixi.log(x::Real) + + Custom natural logarithm function which returns `NaN` for negative arguments instead of throwing an error. + This is required to ensure [correct results for multithreaded computations](https://github.com/trixi-framework/Trixi.jl/issues/1766) + when using the [`Polyester` package](https://github.com/JuliaSIMD/Polyester.jl), + i.e., using the `@batch` macro instead of the Julia built-in `@threads` macro, see [`@threaded`](@ref). + + We dispatch this function for `Float64, Float32, Float16` to the respective LLVM intrinsics + `llvm.log.f64`, `llvm.log.f32`, `llvm.log.f16` as for this the LLVM functions can be used out-of the box, i.e., + they return `NaN` for negative arguments. + For other types, such as integers or dual numbers required for algorithmic differentiation, we + fall back to the Julia built-in `log` function after a check for negative arguments. + Since these cases are not performance critical, the check for negativity does not hurt here. + + When debugging code, it might be useful to change the implementation of this function to redirect to + the Julia built-in `log` function, as this reports the exact place in code where the domain is violated + in the stacktrace. + + See also [`Trixi.set_log_type`](@ref). + """ + @inline log(x::Real) = x < zero(x) ? oftype(x, NaN) : Base.log(x) + + @inline log(x::Float64) = ccall("llvm.log.f64", llvmcall, Float64, (Float64,), x) + @inline log(x::Float32) = ccall("llvm.log.f32", llvmcall, Float32, (Float32,), x) + @inline log(x::Float16) = ccall("llvm.log.f16", llvmcall, Float16, (Float16,), x) +end + """ ln_mean(x, y) diff --git a/test/test_parabolic_1d.jl b/test/test_parabolic_1d.jl index c1cfec052fe..41d375e2e31 100644 --- a/test/test_parabolic_1d.jl +++ b/test/test_parabolic_1d.jl @@ -195,14 +195,14 @@ end Prandtl = prandtl_number(), gradient_variables = GradientVariablesEntropy()), l2=[ - 2.459359632523962e-5, - 2.3928390718460263e-5, - 0.00011252414117082376, + 2.4593501090944024e-5, + 2.3928163240907908e-5, + 0.00011252309905552921, ], linf=[ - 0.0001185052018830568, - 0.00018987717854305393, - 0.0009597503607920999, + 0.0001185048754512863, + 0.0001898766501935486, + 0.0009597450028770993, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 139b423ead1..83b8318c926 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -710,9 +710,9 @@ end 1.0066867437607972e-13, 6.889210012578449e-14, 1.568290814572709e-13], - linf=[5.963762816918461e-10, - 5.08869890669672e-11, - 1.1581377523661729e-10, + linf=[2.353373051988683e-10, + 2.801543719233024e-11, + 3.930469838486772e-11, 4.61017890529547e-11], tspan=(0.0, 0.1), atol=1.0e-11) From 62610bb6a858eedfaf8fe61f074aa757441a08d1 Mon Sep 17 00:00:00 2001 From: Patrick Ersing <114223904+patrickersing@users.noreply.github.com> Date: Fri, 23 Feb 2024 05:30:08 +0100 Subject: [PATCH 119/166] Separation of TrixiShallowWater.jl (#1809) * remove wet_dry functionality for SWE-1D * remove wet_dry functionality for SWE-2D * remove twolayer equations * remove limiters from swe_quasi_1d * remove export of min_max_speed_chen_noelle * remove elixirs * update news.md * add unit tests to increase coverage * Change news.md according to code review Co-authored-by: Hendrik Ranocha --------- Co-authored-by: Andrew Winters Co-authored-by: Hendrik Ranocha --- NEWS.md | 13 + .../elixir_shallowwater_conical_island.jl | 114 --- .../elixir_shallowwater_parabolic_bowl.jl | 119 --- ...ixir_shallowwater_well_balanced_wet_dry.jl | 207 ----- .../elixir_shallowwater_beach.jl | 123 --- .../elixir_shallowwater_parabolic_bowl.jl | 119 --- ...lixir_shallowwater_twolayer_convergence.jl | 60 -- .../elixir_shallowwater_twolayer_dam_break.jl | 94 -- ...xir_shallowwater_twolayer_well_balanced.jl | 86 -- ...ixir_shallowwater_well_balanced_wet_dry.jl | 172 ---- .../elixir_shallowwater_conical_island.jl | 117 --- .../elixir_shallowwater_parabolic_bowl.jl | 121 --- ...lixir_shallowwater_twolayer_convergence.jl | 60 -- ...xir_shallowwater_twolayer_well_balanced.jl | 81 -- ...ixir_shallowwater_well_balanced_wet_dry.jl | 206 ----- ...ixir_shallowwater_three_mound_dam_break.jl | 133 --- ...lixir_shallowwater_twolayer_convergence.jl | 63 -- .../elixir_shallowwater_twolayer_dam_break.jl | 147 ---- ...xir_shallowwater_twolayer_well_balanced.jl | 81 -- src/Trixi.jl | 12 +- src/callbacks_stage/callbacks_stage.jl | 2 - .../positivity_shallow_water.jl | 89 -- .../positivity_shallow_water_dg1d.jl | 89 -- .../positivity_shallow_water_dg2d.jl | 90 -- src/equations/equations.jl | 2 - src/equations/numerical_fluxes.jl | 23 - src/equations/shallow_water_1d.jl | 187 +--- src/equations/shallow_water_2d.jl | 266 +----- src/equations/shallow_water_quasi_1d.jl | 37 +- src/equations/shallow_water_two_layer_1d.jl | 511 ----------- src/equations/shallow_water_two_layer_2d.jl | 805 ------------------ src/solvers/dgsem_tree/indicators.jl | 76 -- src/solvers/dgsem_tree/indicators_1d.jl | 109 --- src/solvers/dgsem_tree/indicators_2d.jl | 110 --- test/test_structured_2d.jl | 78 -- test/test_tree_1d.jl | 2 - test/test_tree_1d_shallowwater.jl | 75 -- test/test_tree_1d_shallowwater_twolayer.jl | 74 -- test/test_tree_2d_part3.jl | 3 - test/test_tree_2d_shallowwater.jl | 79 -- test/test_tree_2d_shallowwater_twolayer.jl | 88 -- test/test_unit.jl | 34 +- test/test_unstructured_2d.jl | 101 --- 43 files changed, 56 insertions(+), 5002 deletions(-) delete mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl delete mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl delete mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_beach.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl delete mode 100644 src/callbacks_stage/positivity_shallow_water.jl delete mode 100644 src/callbacks_stage/positivity_shallow_water_dg1d.jl delete mode 100644 src/callbacks_stage/positivity_shallow_water_dg2d.jl delete mode 100644 src/equations/shallow_water_two_layer_1d.jl delete mode 100644 src/equations/shallow_water_two_layer_2d.jl delete mode 100644 test/test_tree_1d_shallowwater_twolayer.jl delete mode 100644 test/test_tree_2d_shallowwater_twolayer.jl diff --git a/NEWS.md b/NEWS.md index feccd1f9852..ecc91581e9a 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,19 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes when updating to v0.7 from v0.6.x + +#### Added + +#### Changed + +#### Deprecated + +#### Removed +- Some specialized shallow water specific features are no longer available directly in + Trixi.jl, but are moved to a dedicated repository: [TrixiShallowWater.jl](https://github.com/trixi-framework/TrixiShallowWater.jl). This includes all features related to wetting and drying, as well as the `ShallowWaterTwoLayerEquations1D` and `ShallowWaterTwoLayerEquations2D`. + However, the basic shallow water equations are still part of Trixi.jl. We'll also be updating the TrixiShallowWater.jl documentation with instructions on how to use these relocated features in the future. + ## Changes in the v0.6 lifecycle #### Added diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl b/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl deleted file mode 100644 index e65ed19221e..00000000000 --- a/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl +++ /dev/null @@ -1,114 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81, H0 = 1.4) - -""" - initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - -Initial condition for the [`ShallowWaterEquations2D`](@ref) to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) -and its handling of discontinuous water heights at the start in combination with wetting and -drying. The bottom topography is given by a conical island in the middle of the domain. Around that -island, there is a cylindrical water column at t=0 and the rest of the domain is dry. This -discontinuous water height is smoothed by a logistic function. This simulation uses periodic -boundary conditions. -""" -function initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - # Set the background values - - v1 = 0.0 - v2 = 0.0 - - x1, x2 = x - b = max(0.1, 1.0 - 4.0 * sqrt(x1^2 + x2^2)) - - # use a logistic function to transfer water height value smoothly - L = equations.H0 # maximum of function - x0 = 0.3 # center point of function - k = -25.0 # sharpness of transfer - - H = max(b, L / (1.0 + exp(-k * (sqrt(x1^2 + x2^2) - x0)))) - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_conical_island - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Get the StructuredMesh and setup a periodic mesh - -coordinates_min = (-1.0, -1.0) -coordinates_max = (1.0, 1.0) - -cells_per_dimension = (16, 16) - -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl deleted file mode 100644 index bc198f18835..00000000000 --- a/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl +++ /dev/null @@ -1,119 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81) - -""" - initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D) - -Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its -wet-dry mechanics. This test has an analytical solution. The initial condition is defined by the -analytical solution at time t=0. The bottom topography defines a bowl and the water level is given -by an oscillating lake. - -The original test and its analytical solution were first presented in -- William C. Thacker (1981) - Some exact solutions to the nonlinear shallow-water wave equations - [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882). - -The particular setup below is taken from Section 6.2 of -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038). -""" -function initial_condition_parabolic_bowl(x, t, equations::ShallowWaterEquations2D) - a = 1.0 - h_0 = 0.1 - sigma = 0.5 - ω = sqrt(2 * equations.gravity * h_0) / a - - v1 = -sigma * ω * sin(ω * t) - v2 = sigma * ω * cos(ω * t) - - b = h_0 * ((x[1])^2 + (x[2])^2) / a^2 - - H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) + 2 * x[2] * sin(ω * t) - sigma) + h_0 - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_parabolic_bowl - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.6, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) - -cells_per_dimension = (150, 150) - -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl deleted file mode 100644 index 8e492b1ba05..00000000000 --- a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl +++ /dev/null @@ -1,207 +0,0 @@ - -using OrdinaryDiffEq -using Trixi -using Printf: @printf, @sprintf - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.812) - -""" - initial_condition_well_balanced_chen_noelle(x, t, equations:: ShallowWaterEquations2D) - -Initial condition with a complex (discontinuous) bottom topography to test the well-balanced -property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the -domain. The errors from the analysis callback are not important but the error for this -lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff. - -The initial condition is taken from Section 5.2 of the paper: -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -function initial_condition_complex_bottom_well_balanced(x, t, - equations::ShallowWaterEquations2D) - v1 = 0 - v2 = 0 - b = sin(4 * pi * x[1]) + 3 - - if x[1] >= 0.5 - b = sin(4 * pi * x[1]) + 1 - end - - H = max(b, 2.5) - - if x[1] >= 0.5 - H = max(b, 1.5) - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_complex_bottom_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the StructuredMesh for the domain [0, 1]^2 - -coordinates_min = (0.0, 0.0) -coordinates_max = (1.0, 1.0) - -cells_per_dimension = (16, 16) - -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Workaround to set a discontinuous water and bottom topography for -# debugging and testing. Essentially, this is a slight augmentation of the -# `compute_coefficients` where the `x` node value passed here is slightly -# perturbed to the left / right in order to set a true discontinuity that avoids -# the doubled value of the LGL nodes at a particular element interface. -# -# Note! The errors from the analysis callback are not important but the error -# for this lake at rest test case `∑|H0-(h+b)|` should be near machine roundoff. - -# point to the data we want to augment -u = Trixi.wrap_array(ode.u0, semi) -# reset the initial condition -for element in eachelement(semi.solver, semi.cache) - for j in eachnode(semi.solver), i in eachnode(semi.solver) - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, - semi.solver, i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_node = initial_condition_complex_bottom_well_balanced(x_node, first(tspan), - equations) - Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element) - end -end - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); dt = 1.0, - ode_default_options()..., callback = callbacks, adaptive = false); - -summary_callback() # print the timer summary - -############################################################################### -# Workaround to compute the well-balancedness error for this particular problem -# that has two reference water heights. One for a lake to the left of the -# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the -# right of the discontinuous bottom topography `H0_lower = 1.5`. - -# Declare a special version of the function to compute the lake-at-rest error -# OBS! The reference water height values are hardcoded for convenience. -function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations2D) - h, _, _, b = u - - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - if x[1] < 0.5 - H0_wet_dry = max(2.5, b + equations.threshold_limiter) - else - H0_wet_dry = max(1.5, b + equations.threshold_limiter) - end - - return abs(H0_wet_dry - (h + b)) -end - -# point to the data we want to analyze -u = Trixi.wrap_array(sol[end], semi) -# Perform the actual integration of the well-balancedness error over the domain -l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, - semi.cache; - normalize = true) do u, i, j, element, - equations, solver - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, - i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_local = Trixi.get_node_vars(u, equations, solver, i, j, element) - return lake_at_rest_error_two_level(u_local, x_node, equations) -end - -# report the well-balancedness lake-at-rest error to the screen -println("─"^100) -println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver), - " at final time " * @sprintf("%10.8e", tspan[end])) - -@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error)) -@printf(" % 10.8e", l1_well_balance_error) -println() -println("─"^100) diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl b/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl deleted file mode 100644 index 378079ca334..00000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl +++ /dev/null @@ -1,123 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations1D(gravity_constant = 9.812) - -""" - initial_condition_beach(x, t, equations:: ShallowWaterEquations1D) -Initial condition to simulate a wave running towards a beach and crashing. Difficult test -including both wetting and drying in the domain using slip wall boundary conditions. -The bottom topography is altered to be differentiable on the domain [0,8] and -differs from the reference below. - -The water height and speed functions used here, are adapted from the initial condition -found in section 5.2 of the paper: - - Andreas Bollermann, Sebastian Noelle, Maria Lukáčová-Medvid’ová (2011) - Finite volume evolution Galerkin methods for the shallow water equations with dry beds\n - [DOI: 10.4208/cicp.220210.020710a](https://dx.doi.org/10.4208/cicp.220210.020710a) -""" -function initial_condition_beach(x, t, equations::ShallowWaterEquations1D) - D = 1 - delta = 0.02 - gamma = sqrt((3 * delta) / (4 * D)) - x_a = sqrt((4 * D) / (3 * delta)) * acosh(sqrt(20)) - - f = D + 40 * delta * sech(gamma * (8 * x[1] - x_a))^2 - - # steep curved beach - b = 0.01 + 99 / 409600 * 4^x[1] - - if x[1] >= 6 - H = b - v = 0.0 - else - H = f - v = sqrt(equations.gravity / D) * H - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v, b), equations) -end - -initial_condition = initial_condition_beach -boundary_condition = boundary_condition_slip_wall - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [0, 8] - -coordinates_min = 0.0 -coordinates_max = 8.0 - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 7, - n_cells_max = 10_000, - periodicity = false) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(dt = 0.5, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl deleted file mode 100644 index a586562af7e..00000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl +++ /dev/null @@ -1,119 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations1D(gravity_constant = 9.81) - -""" - initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations1D) - -Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its -wet-dry mechanics. This test has analytical solutions. The initial condition is defined by the -analytical solution at time t=0. The bottom topography defines a bowl and the water level is given -by an oscillating lake. - -The original test and its analytical solution in two dimensions were first presented in -- William C. Thacker (1981) - Some exact solutions to the nonlinear shallow-water wave equations - [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882). - -The particular setup below is taken from Section 6.2 of -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038). -""" -function initial_condition_parabolic_bowl(x, t, equations::ShallowWaterEquations1D) - a = 1 - h_0 = 0.1 - sigma = 0.5 - ω = sqrt(2 * equations.gravity * h_0) / a - - v = -sigma * ω * sin(ω * t) - - b = h_0 * x[1]^2 / a^2 - - H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) - sigma) + h_0 - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v, b), equations) -end - -initial_condition = initial_condition_parabolic_bowl - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(5) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [-2, 2] - -coordinates_min = -2.0 -coordinates_max = 2.0 - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl deleted file mode 100644 index e6a01849852..00000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ /dev/null @@ -1,60 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations - -equations = ShallowWaterTwoLayerEquations1D(gravity_constant = 10.0, rho_upper = 0.9, - rho_lower = 1.0) - -initial_condition = initial_condition_convergence_test - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = 0.0 -coordinates_max = sqrt(2.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = true) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -# use a Runge-Kutta method with automatic (error based) time step size control -sol = solve(ode, RDPK3SpFSAL49(), abstol = 1.0e-8, reltol = 1.0e-8, - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl b/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl deleted file mode 100644 index 03b93754d0f..00000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl +++ /dev/null @@ -1,94 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations for a dam break -# test with a discontinuous bottom topography function to test entropy conservation - -equations = ShallowWaterTwoLayerEquations1D(gravity_constant = 9.81, H0 = 2.0, - rho_upper = 0.9, rho_lower = 1.0) - -# Initial condition of a dam break with a discontinuous water heights and bottom topography. -# Works as intended for TreeMesh1D with `initial_refinement_level=5`. If the mesh -# refinement level is changed the initial condition below may need changed as well to -# ensure that the discontinuities lie on an element interface. -function initial_condition_dam_break(x, t, equations::ShallowWaterTwoLayerEquations1D) - v1_upper = 0.0 - v1_lower = 0.0 - - # Set the discontinuity - if x[1] <= 10.0 - H_lower = 2.0 - H_upper = 4.0 - b = 0.0 - else - H_lower = 1.5 - H_upper = 3.0 - b = 0.5 - end - - return prim2cons(SVector(H_upper, v1_upper, H_lower, v1_lower, b), equations) -end - -initial_condition = initial_condition_dam_break - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a non-periodic mesh - -coordinates_min = 0.0 -coordinates_max = 20.0 -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10000, - periodicity = false) - -boundary_condition = boundary_condition_slip_wall - -# create the semidiscretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition) - -############################################################################### -# ODE solvers - -tspan = (0.0, 0.4) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_total, - energy_kinetic, - energy_internal)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -# use a Runge-Kutta method with automatic (error based) time step size control -sol = solve(ode, RDPK3SpFSAL49(), abstol = 1.0e-8, reltol = 1.0e-8, - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl deleted file mode 100644 index 098e3aaf601..00000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ /dev/null @@ -1,86 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations to test well-balancedness - -equations = ShallowWaterTwoLayerEquations1D(gravity_constant = 1.0, H0 = 0.6, - rho_upper = 0.9, rho_lower = 1.0) - -""" - initial_condition_fjordholm_well_balanced(x, t, equations::ShallowWaterTwoLayerEquations1D) - -Initial condition to test well balanced with a bottom topography from Fjordholm -- Ulrik Skre Fjordholm (2012) - Energy conservative and stable schemes for the two-layer shallow water equations. - [DOI: 10.1142/9789814417099_0039](https://doi.org/10.1142/9789814417099_0039) -""" -function initial_condition_fjordholm_well_balanced(x, t, - equations::ShallowWaterTwoLayerEquations1D) - inicenter = 0.5 - x_norm = x[1] - inicenter - r = abs(x_norm) - - H_lower = 0.5 - H_upper = 0.6 - v1_upper = 0.0 - v1_lower = 0.0 - b = r <= 0.1 ? 0.2 * (cos(10 * pi * (x[1] - 0.5)) + 1) : 0.0 - return prim2cons(SVector(H_upper, v1_upper, H_lower, v1_lower, b), equations) -end - -initial_condition = initial_condition_fjordholm_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_es_ersing_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = 0.0 -coordinates_max = 1.0 -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000, - periodicity = true) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (lake_at_rest_error,)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl deleted file mode 100644 index 26a8960ab46..00000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl +++ /dev/null @@ -1,172 +0,0 @@ - -using OrdinaryDiffEq -using Trixi -using Printf: @printf, @sprintf - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations1D(gravity_constant = 9.812) - -""" - initial_condition_complex_bottom_well_balanced(x, t, equations:: ShallowWaterEquations1D) - -Initial condition with a complex (discontinuous) bottom topography to test the well-balanced -property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the -domain. The errors from the analysis callback are not important but the error for this -lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff. - -The initial condition is taken from Section 5.2 of the paper: -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -function initial_condition_complex_bottom_well_balanced(x, t, - equations::ShallowWaterEquations1D) - v = 0.0 - b = sin(4 * pi * x[1]) + 3 - - if x[1] >= 0.5 - b = sin(4 * pi * x[1]) + 1 - end - - H = max(b, 2.5) - - if x[1] >= 0.5 - H = max(b, 1.5) - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v, b), equations) -end - -initial_condition = initial_condition_complex_bottom_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [0, 1] - -coordinates_min = 0.0 -coordinates_max = 1.0 - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 25.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 5000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 5000, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.5) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); dt = 1.0, - ode_default_options()..., callback = callbacks, adaptive = false); - -summary_callback() # print the timer summary - -############################################################################### -# Workaround to compute the well-balancedness error for this particular problem -# that has two reference water heights. One for a lake to the left of the -# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the -# right of the discontinuous bottom topography `H0_lower = 1.5`. - -# Declare a special version of the function to compute the lake-at-rest error -# OBS! The reference water height values are hardcoded for convenience. -function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations1D) - h, _, b = u - - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - if x[1] < 0.5 - H0_wet_dry = max(2.5, b + equations.threshold_limiter) - else - H0_wet_dry = max(1.5, b + equations.threshold_limiter) - end - - return abs(H0_wet_dry - (h + b)) -end - -# point to the data we want to analyze -u = Trixi.wrap_array(sol[end], semi) -# Perform the actual integration of the well-balancedness error over the domain -l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, - semi.cache; - normalize = true) do u, i, element, - equations, solver - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, - i, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1])) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1])) - end - u_local = Trixi.get_node_vars(u, equations, solver, i, element) - return lake_at_rest_error_two_level(u_local, x_node, equations) -end - -# report the well-balancedness lake-at-rest error to the screen -println("─"^100) -println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver), - " at final time " * @sprintf("%10.8e", tspan[end])) - -@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error)) -@printf(" % 10.8e", l1_well_balance_error) -println() -println("─"^100) diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl b/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl deleted file mode 100644 index 349b3741869..00000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl +++ /dev/null @@ -1,117 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81, H0 = 1.4) - -""" - initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - -Initial condition for the [`ShallowWaterEquations2D`](@ref) to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) -and its handling of discontinuous water heights at the start in combination with wetting and -drying. The bottom topography is given by a conical island in the middle of the domain. Around that -island, there is a cylindrical water column at t=0 and the rest of the domain is dry. This -discontinuous water height is smoothed by a logistic function. This simulation uses a Dirichlet -boundary condition with the initial values. Due to the dry cells at the boundary, this has the -effect of an outflow which can be seen in the simulation. -""" -function initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - # Set the background values - - v1 = 0.0 - v2 = 0.0 - - x1, x2 = x - b = max(0.1, 1.0 - 4.0 * sqrt(x1^2 + x2^2)) - - # use a logistic function to transfer water height value smoothly - L = equations.H0 # maximum of function - x0 = 0.3 # center point of function - k = -25.0 # sharpness of transfer - - H = max(b, L / (1.0 + exp(-k * (sqrt(x1^2 + x2^2) - x0)))) - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_conical_island -boundary_conditions = BoundaryConditionDirichlet(initial_condition) - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Get the TreeMesh and setup a mesh - -coordinates_min = (-1.0, -1.0) -coordinates_max = (1.0, 1.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000, - periodicity = false) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_conditions) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl deleted file mode 100644 index 2008019cc31..00000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl +++ /dev/null @@ -1,121 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81) - -""" - initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D) - -Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its -wet-dry mechanics. This test has an analytical solution. The initial condition is defined by the -analytical solution at time t=0. The bottom topography defines a bowl and the water level is given -by an oscillating lake. - -The original test and its analytical solution were first presented in -- William C. Thacker (1981) - Some exact solutions to the nonlinear shallow-water wave equations - [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882). - -The particular setup below is taken from Section 6.2 of -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038). -""" -function initial_condition_parabolic_bowl(x, t, equations::ShallowWaterEquations2D) - a = 1.0 - h_0 = 0.1 - sigma = 0.5 - ω = sqrt(2 * equations.gravity * h_0) / a - - v1 = -sigma * ω * sin(ω * t) - v2 = sigma * ω * cos(ω * t) - - b = h_0 * ((x[1])^2 + (x[2])^2) / a^2 - - H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) + 2 * x[2] * sin(ω * t) - sigma) + h_0 - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_parabolic_bowl -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(7) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.6, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [-2, 2]^2 - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl deleted file mode 100644 index 790916e4467..00000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ /dev/null @@ -1,60 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 10.0, rho_upper = 0.9, - rho_lower = 1.0) - -initial_condition = initial_condition_convergence_test - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = (0.0, 0.0) -coordinates_max = (sqrt(2.0), sqrt(2.0)) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 20_000, - periodicity = true) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -# use a Runge-Kutta method with automatic (error based) time step size control -sol = solve(ode, RDPK3SpFSAL49(), abstol = 1.0e-8, reltol = 1.0e-8, - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl deleted file mode 100644 index 264c26390fe..00000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ /dev/null @@ -1,81 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations with a bottom topography function -# to test well-balancedness - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 9.81, H0 = 0.6, - rho_upper = 0.9, rho_lower = 1.0) - -# An initial condition with constant total water height, zero velocities and a bottom topography to -# test well-balancedness -function initial_condition_well_balanced(x, t, equations::ShallowWaterTwoLayerEquations2D) - H_lower = 0.5 - H_upper = 0.6 - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - b = (((x[1] - 0.5)^2 + (x[2] - 0.5)^2) < 0.04 ? - 0.2 * (cos(4 * pi * sqrt((x[1] - 0.5)^2 + (x[2] + - -0.5)^2)) + 1) : 0.0) - - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -initial_condition = initial_condition_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_es_ersing_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = (0.0, 0.0) -coordinates_max = (1.0, 1.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = true) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - extra_analysis_integrals = (lake_at_rest_error,)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl deleted file mode 100644 index 034411c2b54..00000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl +++ /dev/null @@ -1,206 +0,0 @@ - -using OrdinaryDiffEq -using Trixi -using Printf: @printf, @sprintf - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.812) - -""" - initial_condition_well_balanced_chen_noelle(x, t, equations:: ShallowWaterEquations2D) - -Initial condition with a complex (discontinuous) bottom topography to test the well-balanced -property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the -domain. The errors from the analysis callback are not important but the error for this -lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff. - -The initial condition is taken from Section 5.2 of the paper: -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -function initial_condition_complex_bottom_well_balanced(x, t, - equations::ShallowWaterEquations2D) - v1 = 0 - v2 = 0 - b = sin(4 * pi * x[1]) + 3 - - if x[1] >= 0.5 - b = sin(4 * pi * x[1]) + 1 - end - - H = max(b, 2.5) - if x[1] >= 0.5 - H = max(b, 1.5) - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_complex_bottom_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [0, 1]^2 - -coordinates_min = (0.0, 0.0) -coordinates_max = (1.0, 1.0) - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 50.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Workaround to set a discontinuous water and bottom topography for -# debugging and testing. Essentially, this is a slight augmentation of the -# `compute_coefficients` where the `x` node value passed here is slightly -# perturbed to the left / right in order to set a true discontinuity that avoids -# the doubled value of the LGL nodes at a particular element interface. -# -# Note! The errors from the analysis callback are not important but the error -# for this lake at rest test case `∑|H0-(h+b)|` should be near machine roundoff. - -# point to the data we want to augment -u = Trixi.wrap_array(ode.u0, semi) -# reset the initial condition -for element in eachelement(semi.solver, semi.cache) - for j in eachnode(semi.solver), i in eachnode(semi.solver) - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, - semi.solver, i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_node = initial_condition_complex_bottom_well_balanced(x_node, first(tspan), - equations) - Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element) - end -end - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 2.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); dt = 1.0, - ode_default_options()..., callback = callbacks, adaptive = false); - -summary_callback() # print the timer summary - -############################################################################### -# Workaround to compute the well-balancedness error for this particular problem -# that has two reference water heights. One for a lake to the left of the -# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the -# right of the discontinuous bottom topography `H0_lower = 1.5`. - -# Declare a special version of the function to compute the lake-at-rest error -# OBS! The reference water height values are hardcoded for convenience. -function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations2D) - h, _, _, b = u - - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - - if x[1] < 0.5 - H0_wet_dry = max(2.5, b + equations.threshold_limiter) - else - H0_wet_dry = max(1.5, b + equations.threshold_limiter) - end - - return abs(H0_wet_dry - (h + b)) -end - -# point to the data we want to analyze -u = Trixi.wrap_array(sol[end], semi) -# Perform the actual integration of the well-balancedness error over the domain -l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, - semi.cache; - normalize = true) do u, i, j, element, - equations, solver - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, - i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_local = Trixi.get_node_vars(u, equations, solver, i, j, element) - return lake_at_rest_error_two_level(u_local, x_node, equations) -end - -# report the well-balancedness lake-at-rest error to the screen -println("─"^100) -println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver), - " at final time " * @sprintf("%10.8e", tspan[end])) - -@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error)) -@printf(" % 10.8e", l1_well_balance_error) -println() -println("─"^100) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl deleted file mode 100644 index df321aad267..00000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl +++ /dev/null @@ -1,133 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81, H0 = 1.875, - threshold_limiter = 1e-12, threshold_wet = 1e-14) - -""" - initial_condition_three_mounds(x, t, equations::ShallowWaterEquations2D) - -Initial condition simulating a dam break. The bottom topography is given by one large and two smaller -mounds. The mounds are flooded by the water for t > 0. To smooth the discontinuity, a logistic function -is applied. - -The initial conditions is taken from Section 6.3 of the paper: -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs\n - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038) -""" -function initial_condition_three_mounds(x, t, equations::ShallowWaterEquations2D) - - # Set the background values - v1 = 0.0 - v2 = 0.0 - - x1, x2 = x - M_1 = 1 - 0.1 * sqrt((x1 - 30.0)^2 + (x2 - 22.5)^2) - M_2 = 1 - 0.1 * sqrt((x1 - 30.0)^2 + (x2 - 7.5)^2) - M_3 = 2.8 - 0.28 * sqrt((x1 - 47.5)^2 + (x2 - 15.0)^2) - - b = max(0.0, M_1, M_2, M_3) - - # use a logistic function to transfer water height value smoothly - L = equations.H0 # maximum of function - x0 = 8 # center point of function - k = -75.0 # sharpness of transfer - - H = max(b, L / (1.0 + exp(-k * (x1 - x0)))) - - # Avoid division by zero by adjusting the initial condition with a small dry state threshold - # that defaults to 500*eps() ≈ 1e-13 in double precision and is set in the constructor above - # for the ShallowWaterEquations struct. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_three_mounds - -function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x, t, - surface_flux_function, - equations::ShallowWaterEquations2D) - # Impulse and bottom from inside, height from external state - u_outer = SVector(equations.threshold_wet, u_inner[2], u_inner[3], u_inner[4]) - - # calculate the boundary flux - flux = surface_flux_function(u_inner, u_outer, normal_direction, equations) - - return flux -end - -boundary_conditions = Dict(:Bottom => boundary_condition_slip_wall, - :Top => boundary_condition_slip_wall, - :Right => boundary_condition_outflow, - :Left => boundary_condition_slip_wall) - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", - joinpath(@__DIR__, "mesh_three_mound.mesh")) - -mesh = UnstructuredMesh2D(mesh_file) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; - boundary_conditions = boundary_conditions) - -############################################################################### -# ODE solver - -tspan = (0.0, 20.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl deleted file mode 100644 index fcc08b6f991..00000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ /dev/null @@ -1,63 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations with a periodic -# bottom topography function (set in the initial conditions) - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 10.0, rho_upper = 0.9, - rho_lower = 1.0) - -initial_condition = initial_condition_convergence_test - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 6, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# This setup is for the curved, split form convergence test on a periodic domain - -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) - -mesh = UnstructuredMesh2D(mesh_file, periodicity = true) - -# Create the semidiscretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl deleted file mode 100644 index 821f31c52ac..00000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl +++ /dev/null @@ -1,147 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations for a dam break test with a -# discontinuous bottom topography function to test energy conservation - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 1.0, rho_upper = 0.9, - rho_lower = 1.0) - -# This test case uses a special work around to setup a truly discontinuous bottom topography -# function and initial condition for this academic testcase of entropy conservation. First, a -# dummy initial_condition_dam_break is introduced to create the semidiscretization. Then the initial -# condition is reset with the true discontinuous values from initial_condition_discontinuous_dam_break. - -function initial_condition_dam_break(x, t, equations::ShallowWaterTwoLayerEquations2D) - if x[1] < sqrt(2) / 2 - H_upper = 1.0 - H_lower = 0.6 - b = 0.1 - else - H_upper = 0.9 - H_lower = 0.5 - b = 0.0 - end - - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -initial_condition = initial_condition_dam_break - -boundary_condition_constant = BoundaryConditionDirichlet(initial_condition_dam_break) - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 6, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) - -mesh = UnstructuredMesh2D(mesh_file, periodicity = false) - -# Boundary conditions -boundary_condition = Dict(:Top => boundary_condition_slip_wall, - :Left => boundary_condition_slip_wall, - :Right => boundary_condition_slip_wall, - :Bottom => boundary_condition_slip_wall) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, - solver, boundary_conditions = boundary_condition) - -############################################################################### -# ODE solver - -tspan = (0.0, 0.5) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Workaround to set a discontinuous bottom topography and initial condition for debugging and testing. - -# alternative version of the initial conditinon used to setup a truly discontinuous -# test case and initial condition. -# In contrast to the usual signature of initial conditions, this one get passed the -# `element_id` explicitly. In particular, this initial conditions works as intended -# only for the specific mesh loaded above! - -function initial_condition_discontinuous_dam_break(x, t, element_id, - equations::ShallowWaterTwoLayerEquations2D) - # Constant values - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - - # Left side of discontinuity - IDs = [1, 2, 5, 6, 9, 10, 13, 14] - if element_id in IDs - H_upper = 1.0 - H_lower = 0.6 - b = 0.0 - # Right side of discontinuity - else - H_upper = 0.9 - H_lower = 0.5 - b = 0.1 - end - - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -# point to the data we want to augment -u = Trixi.wrap_array(ode.u0, semi) -# reset the initial condition -for element in eachelement(semi.solver, semi.cache) - for j in eachnode(semi.solver), i in eachnode(semi.solver) - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, - semi.solver, i, j, element) - u_node = initial_condition_discontinuous_dam_break(x_node, first(tspan), element, - equations) - Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element) - end -end - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_total, - energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl deleted file mode 100644 index ca1f54595bb..00000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ /dev/null @@ -1,81 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations with a discontinuous bottom -# topography to test well-balancedness - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 1.0, H0 = 0.6, - rho_upper = 0.9, rho_lower = 1.0) - -# An initial condition with constant total water height, zero velocities and a bottom topography to -# test well-balancedness -function initial_condition_well_balanced(x, t, equations::ShallowWaterTwoLayerEquations2D) - H_lower = 0.5 - H_upper = 0.6 - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - - # Bottom Topography - b = (((x[1] - 0.5)^2 + (x[2] - 0.5)^2) < 0.04 ? - 0.2 * (cos(4 * pi * sqrt((x[1] - 0.5)^2 + (x[2] + - -0.5)^2)) + 1) : 0.0) - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -initial_condition = initial_condition_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_es_ersing_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 6, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# This setup is for the curved, split form well-balancedness testing - -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) - -mesh = UnstructuredMesh2D(mesh_file, periodicity = true) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - extra_analysis_integrals = (lake_at_rest_error,)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index b7f7767a9d8..5f8cd9cae8e 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -160,7 +160,6 @@ export AcousticPerturbationEquations2D, InviscidBurgersEquation1D, LatticeBoltzmannEquations2D, LatticeBoltzmannEquations3D, ShallowWaterEquations1D, ShallowWaterEquations2D, - ShallowWaterTwoLayerEquations1D, ShallowWaterTwoLayerEquations2D, ShallowWaterEquationsQuasi1D, LinearizedEulerEquations2D, PolytropicEulerEquations2D, @@ -179,16 +178,12 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle, flux_kennedy_gruber, flux_shima_etal, flux_ec, flux_fjordholm_etal, flux_nonconservative_fjordholm_etal, flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal, - flux_es_ersing_etal, flux_nonconservative_ersing_etal, + flux_nonconservative_ersing_etal, flux_chan_etal, flux_nonconservative_chan_etal, flux_winters_etal, hydrostatic_reconstruction_audusse_etal, flux_nonconservative_audusse_etal, -# TODO: TrixiShallowWater: move anything with "chen_noelle" to new file - hydrostatic_reconstruction_chen_noelle, flux_nonconservative_chen_noelle, - flux_hll_chen_noelle, FluxPlusDissipation, DissipationGlobalLaxFriedrichs, DissipationLocalLaxFriedrichs, FluxLaxFriedrichs, max_abs_speed_naive, FluxHLL, min_max_speed_naive, min_max_speed_davis, min_max_speed_einfeldt, - min_max_speed_chen_noelle, FluxLMARS, FluxRotated, flux_shima_etal_turbo, flux_ranocha_turbo, @@ -239,8 +234,6 @@ export DG, VolumeIntegralFluxDifferencing, VolumeIntegralPureLGLFiniteVolume, VolumeIntegralShockCapturingHG, IndicatorHennemannGassner, -# TODO: TrixiShallowWater: move new indicator - IndicatorHennemannGassnerShallowWater, VolumeIntegralUpwind, SurfaceIntegralWeakForm, SurfaceIntegralStrongForm, SurfaceIntegralUpwind, @@ -276,8 +269,7 @@ export load_mesh, load_time, load_timestep, load_timestep!, load_dt, export ControllerThreeLevel, ControllerThreeLevelCombined, IndicatorLöhner, IndicatorLoehner, IndicatorMax -# TODO: TrixiShallowWater: move new limiter -export PositivityPreservingLimiterZhangShu, PositivityPreservingLimiterShallowWater +export PositivityPreservingLimiterZhangShu export trixi_include, examples_dir, get_examples, default_example, default_example_unstructured, ode_default_options diff --git a/src/callbacks_stage/callbacks_stage.jl b/src/callbacks_stage/callbacks_stage.jl index 70d60de7914..d5abc1d227d 100644 --- a/src/callbacks_stage/callbacks_stage.jl +++ b/src/callbacks_stage/callbacks_stage.jl @@ -8,6 +8,4 @@ include("positivity_zhang_shu.jl") include("subcell_limiter_idp_correction.jl") include("subcell_bounds_check.jl") -# TODO: TrixiShallowWater: move specific limiter file -include("positivity_shallow_water.jl") end # @muladd diff --git a/src/callbacks_stage/positivity_shallow_water.jl b/src/callbacks_stage/positivity_shallow_water.jl deleted file mode 100644 index 36276026fe9..00000000000 --- a/src/callbacks_stage/positivity_shallow_water.jl +++ /dev/null @@ -1,89 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: generic wet/dry limiter - -""" - PositivityPreservingLimiterShallowWater(; variables) - -The limiter is specifically designed for the shallow water equations. -It is applied to all scalar `variables` in their given order -using the defined `threshold_limiter` from the [`ShallowWaterEquations1D`](@ref) struct -or the [`ShallowWaterEquations2D`](@ref) struct to determine the minimal acceptable values. -The order of the `variables` is important and might have a strong influence -on the robustness. - -As opposed to the standard version of the [`PositivityPreservingLimiterZhangShu`](@ref), -nodes with a water height below the `threshold_limiter` are treated in a special way. -To avoid numerical problems caused by velocities close to zero, -the velocity is cut off, such that the node can be identified as "dry". The special feature of the -`ShallowWaterEquations` used here is that the bottom topography is stored as an additional -quantity in the solution vector `u`. However, the value of the bottom topography -should not be changed. That is why, it is not limited. - -After the limiting process is applied to all degrees of freedom, for safety reasons, -the `threshold_limiter` is applied again on all the DG nodes in order to avoid water height below. -In the case where the cell mean value is below the threshold before applying the limiter, -there could still be dry nodes afterwards due to the logic of the limiter. - -This fully-discrete positivity-preserving limiter is based on the work of -- Zhang, Shu (2011) - Maximum-principle-satisfying and positivity-preserving high-order schemes - for conservation laws: survey and new developments - [doi: 10.1098/rspa.2011.0153](https://doi.org/10.1098/rspa.2011.0153) -""" -struct PositivityPreservingLimiterShallowWater{N, Variables <: NTuple{N, Any}} - variables::Variables -end - -function PositivityPreservingLimiterShallowWater(; variables) - PositivityPreservingLimiterShallowWater(variables) -end - -function (limiter!::PositivityPreservingLimiterShallowWater)(u_ode, integrator, - semi::AbstractSemidiscretization, - t) - u = wrap_array(u_ode, semi) - @trixi_timeit timer() "positivity-preserving limiter" limiter_shallow_water!(u, - limiter!.variables, - mesh_equations_solver_cache(semi)...) -end - -# Iterate over tuples in a type-stable way using "lispy tuple programming", -# similar to https://stackoverflow.com/a/55849398: -# Iterating over tuples of different functions isn't type-stable in general -# but accessing the first element of a tuple is type-stable. Hence, it's good -# to process one element at a time and replace iteration by recursion here. -# Note that you shouldn't use this with too many elements per tuple since the -# compile times can increase otherwise - but a handful of elements per tuple -# is definitely fine. -function limiter_shallow_water!(u, variables::NTuple{N, Any}, - mesh, - equations::Union{ShallowWaterEquations1D, - ShallowWaterEquations2D}, - solver, cache) where {N} - variable = first(variables) - remaining_variables = Base.tail(variables) - - limiter_shallow_water!(u, equations.threshold_limiter, variable, mesh, equations, - solver, cache) - limiter_shallow_water!(u, remaining_variables, mesh, equations, solver, cache) - return nothing -end - -# terminate the type-stable iteration over tuples -function limiter_shallow_water!(u, variables::Tuple{}, - mesh, - equations::Union{ShallowWaterEquations1D, - ShallowWaterEquations2D}, - solver, cache) - nothing -end - -include("positivity_shallow_water_dg1d.jl") -include("positivity_shallow_water_dg2d.jl") -end # @muladd diff --git a/src/callbacks_stage/positivity_shallow_water_dg1d.jl b/src/callbacks_stage/positivity_shallow_water_dg1d.jl deleted file mode 100644 index 13c6866e895..00000000000 --- a/src/callbacks_stage/positivity_shallow_water_dg1d.jl +++ /dev/null @@ -1,89 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 1D wet/dry limiter should move - -function limiter_shallow_water!(u, threshold::Real, variable, - mesh::AbstractMesh{1}, - equations::ShallowWaterEquations1D, - dg::DGSEM, cache) - @unpack weights = dg.basis - - @threaded for element in eachelement(dg, cache) - # determine minimum value - value_min = typemax(eltype(u)) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - value_min = min(value_min, variable(u_node, equations)) - end - - # detect if limiting is necessary - value_min < threshold || continue - - # compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, element)) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - u_mean += u_node * weights[i] - end - # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2 - u_mean = u_mean / 2^ndims(mesh) - - # We compute the value directly with the mean values, as we assume that - # Jensen's inequality holds (e.g. pressure for compressible Euler equations). - value_mean = variable(u_mean, equations) - theta = (value_mean - threshold) / (value_mean - value_min) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - - # Cut off velocity in case that the waterheight is smaller than the threshold - - h_node, h_v_node, b_node = u_node - h_mean, h_v_mean, _ = u_mean # b_mean is not used as b_node must not be overwritten - - # Set them both to zero to apply linear combination correctly - if h_node <= threshold - h_v_node = zero(eltype(u)) - h_v_mean = zero(eltype(u)) - end - - u_node = SVector(h_node, h_v_node, b_node) - u_mean = SVector(h_mean, h_v_mean, b_node) - - # When velocity is cut off, the only averaged value is the waterheight, - # because the velocity is set to zero and this value is passed. - # Otherwise, the velocity is averaged, as well. - # Note that the auxiliary bottom topography variable `b` is never limited. - set_node_vars!(u, theta * u_node + (1 - theta) * u_mean, - equations, dg, i, element) - end - end - - # "Safety" application of the wet/dry thresholds over all the DG nodes - # on the current `element` after the limiting above in order to avoid dry nodes. - # If the value_mean < threshold before applying limiter, there - # could still be dry nodes afterwards due to logic of the limiting - @threaded for element in eachelement(dg, cache) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - - h, hv, b = u_node - - if h <= threshold - h = threshold - hv = zero(eltype(u)) - end - - u_node = SVector(h, hv, b) - - set_node_vars!(u, u_node, equations, dg, i, element) - end - end - - return nothing -end -end # @muladd diff --git a/src/callbacks_stage/positivity_shallow_water_dg2d.jl b/src/callbacks_stage/positivity_shallow_water_dg2d.jl deleted file mode 100644 index da3a25fdcf4..00000000000 --- a/src/callbacks_stage/positivity_shallow_water_dg2d.jl +++ /dev/null @@ -1,90 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 2D wet/dry limiter should move - -function limiter_shallow_water!(u, threshold::Real, variable, - mesh::AbstractMesh{2}, - equations::ShallowWaterEquations2D, dg::DGSEM, cache) - @unpack weights = dg.basis - - @threaded for element in eachelement(dg, cache) - # determine minimum value - value_min = typemax(eltype(u)) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - value_min = min(value_min, variable(u_node, equations)) - end - - # detect if limiting is necessary - value_min < threshold || continue - - # compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, 1, element)) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - u_mean += u_node * weights[i] * weights[j] - end - # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2 - u_mean = u_mean / 2^ndims(mesh) - - # We compute the value directly with the mean values, as we assume that - # Jensen's inequality holds (e.g. pressure for compressible Euler equations). - value_mean = variable(u_mean, equations) - theta = (value_mean - threshold) / (value_mean - value_min) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - - # Cut off velocity in case that the water height is smaller than the threshold - - h_node, h_v1_node, h_v2_node, b_node = u_node - h_mean, h_v1_mean, h_v2_mean, _ = u_mean # b_mean is not used as it must not be overwritten - - if h_node <= threshold - h_v1_node = zero(eltype(u)) - h_v2_node = zero(eltype(u)) - h_v1_mean = zero(eltype(u)) - h_v2_mean = zero(eltype(u)) - end - - u_node = SVector(h_node, h_v1_node, h_v2_node, b_node) - u_mean = SVector(h_mean, h_v1_mean, h_v2_mean, b_node) - - # When velocities are cut off, the only averaged value is the water height, - # because the velocities are set to zero and this value is passed. - # Otherwise, the velocities are averaged, as well. - # Note that the auxiliary bottom topography variable `b` is never limited. - set_node_vars!(u, theta * u_node + (1 - theta) * u_mean, - equations, dg, i, j, element) - end - end - - # "Safety" application of the wet/dry thresholds over all the DG nodes - # on the current `element` after the limiting above in order to avoid dry nodes. - # If the value_mean < threshold before applying limiter, there - # could still be dry nodes afterwards due to logic of the limiting - @threaded for element in eachelement(dg, cache) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - - h, h_v1, h_v2, b = u_node - - if h <= threshold - h = threshold - h_v1 = zero(eltype(u)) - h_v2 = zero(eltype(u)) - end - - u_node = SVector(h, h_v1, h_v2, b) - - set_node_vars!(u, u_node, equations, dg, i, j, element) - end - end - - return nothing -end -end # @muladd diff --git a/src/equations/equations.jl b/src/equations/equations.jl index 65875a2a7e5..8f476cf6f16 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -405,8 +405,6 @@ abstract type AbstractShallowWaterEquations{NDIMS, NVARS} <: AbstractEquations{NDIMS, NVARS} end include("shallow_water_1d.jl") include("shallow_water_2d.jl") -include("shallow_water_two_layer_1d.jl") -include("shallow_water_two_layer_2d.jl") include("shallow_water_quasi_1d.jl") # CompressibleEulerEquations diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 44d523b6e89..87fcb412244 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -326,29 +326,6 @@ This is a [`FluxHLL`](@ref)-type two-wave solver with special estimates of the w """ const flux_hlle = FluxHLL(min_max_speed_einfeldt) -# TODO: TrixiShallowWater: move the chen_noelle flux structure to the new package - -# An empty version of the `min_max_speed_chen_noelle` function is declared here -# in order to create a dimension agnostic version of `flux_hll_chen_noelle`. -# The full description of this wave speed estimate can be found in the docstrings -# for `min_max_speed_chen_noelle` in `shallow_water_1d.jl` or `shallow_water_2d.jl`. -function min_max_speed_chen_noelle end - -""" - flux_hll_chen_noelle = FluxHLL(min_max_speed_chen_noelle) - -An instance of [`FluxHLL`](@ref) specific to the shallow water equations that -uses the wave speed estimates from [`min_max_speed_chen_noelle`](@ref). -This HLL flux is guaranteed to have zero numerical mass flux out of a "dry" element, -maintain positivity of the water height, and satisfy an entropy inequality. - -For complete details see Section 2.4 of the following reference -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI: 10.1137/15M1053074](https://doi.org/10.1137/15M1053074) -""" -const flux_hll_chen_noelle = FluxHLL(min_max_speed_chen_noelle) - """ flux_shima_etal_turbo(u_ll, u_rr, orientation_or_normal_direction, equations) diff --git a/src/equations/shallow_water_1d.jl b/src/equations/shallow_water_1d.jl index 25ce0fa79fe..e348ef946b7 100644 --- a/src/equations/shallow_water_1d.jl +++ b/src/equations/shallow_water_1d.jl @@ -6,7 +6,7 @@ #! format: noindent @doc raw""" - ShallowWaterEquations1D(; gravity, H0 = 0, threshold_limiter = nothing threshold_wet = nothing) + ShallowWaterEquations1D(; gravity, H0 = 0) Shallow water equations (SWE) in one space dimension. The equations are given by ```math @@ -24,12 +24,6 @@ also defines the total water height as ``H = h + b``. The additional quantity ``H_0`` is also available to store a reference value for the total water height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. -Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not -have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is -used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial -condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to -define when the flow is "wet" before calculating the numerical flux. - The bottom topography function ``b(x)`` is set inside the initial condition routine for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography variable `b` to zero. @@ -51,35 +45,16 @@ References for the SWE are many but a good introduction is available in Chapter [DOI: 10.1017/CBO9780511791253](https://doi.org/10.1017/CBO9780511791253) """ struct ShallowWaterEquations1D{RealT <: Real} <: AbstractShallowWaterEquations{1, 3} - # TODO: TrixiShallowWater: where should the `threshold_limiter` and `threshold_wet` live? - # how to "properly" export these constants across the two packages? gravity::RealT # gravitational constant H0::RealT # constant "lake-at-rest" total water height - # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height, - # as a (small) shift on the initial condition and cutoff before the next time step. - # Default is 500*eps() which in double precision is ≈1e-13. - threshold_limiter::RealT - # `threshold_wet` applied on water height to define when the flow is "wet" - # before calculating the numerical flux. - # Default is 5*eps() which in double precision is ≈1e-15. - threshold_wet::RealT end # Allow for flexibility to set the gravitational constant within an elixir depending on the # application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values. # The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" # well-balancedness test cases. -# Strict default values for thresholds that performed well in many numerical experiments -function ShallowWaterEquations1D(; gravity_constant, H0 = zero(gravity_constant), - threshold_limiter = nothing, threshold_wet = nothing) - T = promote_type(typeof(gravity_constant), typeof(H0)) - if threshold_limiter === nothing - threshold_limiter = 500 * eps(T) - end - if threshold_wet === nothing - threshold_wet = 5 * eps(T) - end - ShallowWaterEquations1D(gravity_constant, H0, threshold_limiter, threshold_wet) +function ShallowWaterEquations1D(; gravity_constant, H0 = zero(gravity_constant)) + ShallowWaterEquations1D(gravity_constant, H0) end have_nonconservative_terms(::ShallowWaterEquations1D) = True() @@ -332,54 +307,6 @@ Further details on the hydrostatic reconstruction and its motivation can be foun z) end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - flux_nonconservative_chen_noelle(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterEquations1D) - -Non-symmetric two-point surface flux that discretizes the nonconservative (source) term. -The discretization uses the `hydrostatic_reconstruction_chen_noelle` on the conservative -variables. - -Should be used together with [`FluxHydrostaticReconstruction`](@ref) and -[`hydrostatic_reconstruction_chen_noelle`](@ref) in the surface flux to ensure consistency. - -Further details on the hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function flux_nonconservative_chen_noelle(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterEquations1D) - - # Pull the water height and bottom topography on the left - h_ll, _, b_ll = u_ll - h_rr, _, b_rr = u_rr - - H_ll = h_ll + b_ll - H_rr = h_rr + b_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Create the hydrostatic reconstruction for the left solution state - u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations) - - # Copy the reconstructed water height for easier to read code - h_ll_star = u_ll_star[1] - - z = zero(eltype(u_ll)) - # Includes two parts: - # (i) Diagonal (consistent) term from the volume flux that uses `b_ll` to avoid - # cross-averaging across a discontinuous bottom topography - # (ii) True surface part that uses `h_ll` and `h_ll_star` to handle discontinuous bathymetry - return SVector(z, - equations.gravity * h_ll * b_ll - - equations.gravity * (h_ll_star + h_ll) * (b_ll - b_star), - z) -end - """ flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations1D) @@ -521,67 +448,6 @@ Further details on this hydrostatic reconstruction and its motivation can be fou return u_ll_star, u_rr_star end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations1D) - -A particular type of hydrostatic reconstruction of the water height to guarantee well-balancedness -for a general bottom topography of the [`ShallowWaterEquations1D`](@ref). The reconstructed solution states -`u_ll_star` and `u_rr_star` variables are used to evaluate the surface numerical flux at the interface. -The key idea is a linear reconstruction of the bottom and water height at the interfaces using subcells. -Use in combination with the generic numerical flux routine [`FluxHydrostaticReconstruction`](@ref). - -Further details on this hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, - equations::ShallowWaterEquations1D) - # Unpack left and right water heights and bottom topographies - h_ll, _, b_ll = u_ll - h_rr, _, b_rr = u_rr - - # Get the velocities on either side - v_ll = velocity(u_ll, equations) - v_rr = velocity(u_rr, equations) - - H_ll = b_ll + h_ll - H_rr = b_rr + h_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Compute the reconstructed water heights - h_ll_star = min(H_ll - b_star, h_ll) - h_rr_star = min(H_rr - b_star, h_rr) - - # Set the water height to be at least the value stored in the variable threshold after - # the hydrostatic reconstruction is applied and before the numerical flux is calculated - # to avoid numerical problem with arbitrary small values. Interfaces with a water height - # lower or equal to the threshold can be declared as dry. - # The default value for `threshold_wet` is ≈ 5*eps(), or 1e-15 in double precision, is set - # in the `ShallowWaterEquations1D` struct. This threshold value can be changed in the constructor - # call of this equation struct in an elixir. - threshold = equations.threshold_wet - - if (h_ll_star <= threshold) - h_ll_star = threshold - v_ll = zero(v_ll) - end - - if (h_rr_star <= threshold) - h_rr_star = threshold - v_rr = zero(v_rr) - end - - # Create the conservative variables using the reconstruted water heights - u_ll_star = SVector(h_ll_star, h_ll_star * v_ll, b_ll) - u_rr_star = SVector(h_rr_star, h_rr_star * v_rr, b_rr) - - return u_ll_star, u_rr_star -end - # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the # maximum velocity magnitude plus the maximum speed of sound @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, @@ -646,39 +512,6 @@ end return λ_min, λ_max end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations1D) - -The approximated speeds for the HLL type numerical flux used by Chen and Noelle for their -hydrostatic reconstruction. As they state in the paper, these speeds are chosen for the numerical -flux to ensure positivity and to satisfy an entropy inequality. - -Further details on this hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations1D) - # Get the velocity quantities - v_ll = velocity(u_ll, equations) - v_rr = velocity(u_rr, equations) - - # Calculate the wave celerity on the left and right - h_ll = waterheight(u_ll, equations) - h_rr = waterheight(u_rr, equations) - - a_ll = sqrt(equations.gravity * h_ll) - a_rr = sqrt(equations.gravity * h_rr) - - λ_min = min(v_ll - a_ll, v_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v_ll + a_ll, v_rr + a_rr, zero(eltype(u_ll))) - - return λ_min, λ_max -end - # More refined estimates for minimum and maximum wave speeds for HLL-type fluxes @inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations1D) @@ -841,20 +674,10 @@ end end # Calculate the error for the "lake-at-rest" test case where H = h+b should -# be a constant value over time. Note, assumes there is a single reference -# water height `H0` with which to compare. -# -# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need -# to modify or have different versions of the `lake_at_rest_error` function +# be a constant value over time. @inline function lake_at_rest_error(u, equations::ShallowWaterEquations1D) h, _, b = u - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - H0_wet_dry = max(equations.H0, b + equations.threshold_limiter) - - return abs(H0_wet_dry - (h + b)) + return abs(equations.H0 - (h + b)) end end # @muladd diff --git a/src/equations/shallow_water_2d.jl b/src/equations/shallow_water_2d.jl index 6728d7d5553..74a299a51e6 100644 --- a/src/equations/shallow_water_2d.jl +++ b/src/equations/shallow_water_2d.jl @@ -6,7 +6,7 @@ #! format: noindent @doc raw""" - ShallowWaterEquations2D(; gravity, H0 = 0, threshold_limiter = nothing, threshold_wet = nothing) + ShallowWaterEquations2D(; gravity, H0 = 0) Shallow water equations (SWE) in two space dimensions. The equations are given by ```math @@ -27,12 +27,6 @@ also defines the total water height as ``H = h + b``. The additional quantity ``H_0`` is also available to store a reference value for the total water height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. -Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not -have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is -used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial -condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to -define when the flow is "wet" before calculating the numerical flux. - The bottom topography function ``b(x,y)`` is set inside the initial condition routine for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography variable `b` to zero. @@ -54,18 +48,8 @@ References for the SWE are many but a good introduction is available in Chapter [DOI: 10.1017/CBO9780511791253](https://doi.org/10.1017/CBO9780511791253) """ struct ShallowWaterEquations2D{RealT <: Real} <: AbstractShallowWaterEquations{2, 4} - # TODO: TrixiShallowWater: where should the `threshold_limiter` and `threshold_wet` live? - # how to "properly" export these constants across the two packages? gravity::RealT # gravitational constant H0::RealT # constant "lake-at-rest" total water height - # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height, - # as a (small) shift on the initial condition and cutoff before the next time step. - # Default is 500*eps() which in double precision is ≈1e-13. - threshold_limiter::RealT - # `threshold_wet` applied on water height to define when the flow is "wet" - # before calculating the numerical flux. - # Default is 5*eps() which in double precision is ≈1e-15. - threshold_wet::RealT end # Allow for flexibility to set the gravitational constant within an elixir depending on the @@ -73,16 +57,8 @@ end # The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" # well-balancedness test cases. # Strict default values for thresholds that performed well in many numerical experiments -function ShallowWaterEquations2D(; gravity_constant, H0 = zero(gravity_constant), - threshold_limiter = nothing, threshold_wet = nothing) - T = promote_type(typeof(gravity_constant), typeof(H0)) - if threshold_limiter === nothing - threshold_limiter = 500 * eps(T) - end - if threshold_wet === nothing - threshold_wet = 5 * eps(T) - end - ShallowWaterEquations2D(gravity_constant, H0, threshold_limiter, threshold_wet) +function ShallowWaterEquations2D(; gravity_constant, H0 = zero(gravity_constant)) + ShallowWaterEquations2D(gravity_constant, H0) end have_nonconservative_terms(::ShallowWaterEquations2D) = True() @@ -460,69 +436,6 @@ Further details for the hydrostatic reconstruction and its motivation can be fou return u_ll_star, u_rr_star end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - -A particular type of hydrostatic reconstruction of the water height to guarantee well-balancedness -for a general bottom topography of the [`ShallowWaterEquations2D`](@ref). The reconstructed solution states -`u_ll_star` and `u_rr_star` variables are then used to evaluate the surface numerical flux at the interface. -The key idea is a linear reconstruction of the bottom and water height at the interfaces using subcells. -Use in combination with the generic numerical flux routine [`FluxHydrostaticReconstruction`](@ref). - -Further details on this hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, - equations::ShallowWaterEquations2D) - # Unpack left and right water heights and bottom topographies - h_ll, _, _, b_ll = u_ll - h_rr, _, _, b_rr = u_rr - - # Get the velocities on either side - v1_ll, v2_ll = velocity(u_ll, equations) - v1_rr, v2_rr = velocity(u_rr, equations) - - H_ll = b_ll + h_ll - H_rr = b_rr + h_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Compute the reconstructed water heights - h_ll_star = min(H_ll - b_star, h_ll) - h_rr_star = min(H_rr - b_star, h_rr) - - # Set the water height to be at least the value stored in the variable threshold after - # the hydrostatic reconstruction is applied and before the numerical flux is calculated - # to avoid numerical problem with arbitrary small values. Interfaces with a water height - # lower or equal to the threshold can be declared as dry. - # The default value for `threshold_wet` is ≈5*eps(), or 1e-15 in double precision, is set - # in the `ShallowWaterEquations2D` struct. This threshold value can be changed in the constructor - # call of this equation struct in an elixir. - threshold = equations.threshold_wet - - if (h_ll_star <= threshold) - h_ll_star = threshold - v1_ll = zero(v1_ll) - v2_ll = zero(v2_ll) - end - - if (h_rr_star <= threshold) - h_rr_star = threshold - v1_rr = zero(v1_rr) - v2_rr = zero(v2_rr) - end - - # Create the conservative variables using the reconstruted water heights - u_ll_star = SVector(h_ll_star, h_ll_star * v1_ll, h_ll_star * v2_ll, b_ll) - u_rr_star = SVector(h_rr_star, h_rr_star * v1_rr, h_rr_star * v2_rr, b_rr) - - return u_ll_star, u_rr_star -end - """ flux_nonconservative_audusse_etal(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations2D) @@ -608,104 +521,6 @@ end return SVector(f1, f2, f3, f4) end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - flux_nonconservative_chen_noelle(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterEquations2D) - flux_nonconservative_chen_noelle(u_ll, u_rr, - normal_direction_ll ::AbstractVector, - normal_direction_average ::AbstractVector, - equations::ShallowWaterEquations2D) - -Non-symmetric two-point surface flux that discretizes the nonconservative (source) term. -The discretization uses the [`hydrostatic_reconstruction_chen_noelle`](@ref) on the conservative -variables. - -Should be used together with [`FluxHydrostaticReconstruction`](@ref) and -[`hydrostatic_reconstruction_chen_noelle`](@ref) in the surface flux to ensure consistency. - -Further details on the hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function flux_nonconservative_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - # Pull the water height and bottom topography on the left - h_ll, _, _, b_ll = u_ll - h_rr, _, _, b_rr = u_rr - - H_ll = h_ll + b_ll - H_rr = h_rr + b_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Create the hydrostatic reconstruction for the left solution state - u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations) - - # Copy the reconstructed water height for easier to read code - h_ll_star = u_ll_star[1] - - z = zero(eltype(u_ll)) - # Includes two parts: - # (i) Diagonal (consistent) term from the volume flux that uses `b_ll` to avoid - # cross-averaging across a discontinuous bottom topography - # (ii) True surface part that uses `h_ll` and `h_ll_star` to handle discontinuous bathymetry - g = equations.gravity - if orientation == 1 - f = SVector(z, - g * h_ll * b_ll - g * (h_ll_star + h_ll) * (b_ll - b_star), - z, z) - else # orientation == 2 - f = SVector(z, z, - g * h_ll * b_ll - g * (h_ll_star + h_ll) * (b_ll - b_star), - z) - end - - return f -end - -@inline function flux_nonconservative_chen_noelle(u_ll, u_rr, - normal_direction_ll::AbstractVector, - normal_direction_average::AbstractVector, - equations::ShallowWaterEquations2D) - # Pull the water height and bottom topography on the left - h_ll, _, _, b_ll = u_ll - h_rr, _, _, b_rr = u_rr - - H_ll = h_ll + b_ll - H_rr = h_rr + b_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Create the hydrostatic reconstruction for the left solution state - u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations) - - # Copy the reconstructed water height for easier to read code - h_ll_star = u_ll_star[1] - - # Comes in two parts: - # (i) Diagonal (consistent) term from the volume flux that uses `normal_direction_average` - # but we use `b_ll` to avoid cross-averaging across a discontinuous bottom topography - - f2 = normal_direction_average[1] * equations.gravity * h_ll * b_ll - f3 = normal_direction_average[2] * equations.gravity * h_ll * b_ll - - # (ii) True surface part that uses `normal_direction_ll`, `h_ll` and `h_ll_star` - # to handle discontinuous bathymetry - - f2 -= normal_direction_ll[1] * equations.gravity * (h_ll_star + h_ll) * - (b_ll - b_star) - f3 -= normal_direction_ll[2] * equations.gravity * (h_ll_star + h_ll) * - (b_ll - b_star) - - # First and last equations do not have a nonconservative flux - f1 = f4 = zero(eltype(u_ll)) - - return SVector(f1, f2, f3, f4) -end - """ flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations2D) @@ -1020,67 +835,6 @@ end return λ_min, λ_max end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - min_max_speed_chen_noelle(u_ll, u_rr, normal_direction::AbstractVector, - equations::ShallowWaterEquations2D) - -Special estimate of the minimal and maximal wave speed of the shallow water equations for -the left and right states `u_ll, u_rr`. These approximate speeds are used for the HLL-type -numerical flux [`flux_hll_chen_noelle`](@ref). These wave speed estimates -together with a particular hydrostatic reconstruction technique guarantee -that the numerical flux is positive and satisfies an entropy inequality. - -Further details on this hydrostatic reconstruction and its motivation can be found in -the reference below. The definition of the wave speeds are given in Equation (2.20). -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - h_ll = waterheight(u_ll, equations) - v1_ll, v2_ll = velocity(u_ll, equations) - h_rr = waterheight(u_rr, equations) - v1_rr, v2_rr = velocity(u_rr, equations) - - a_ll = sqrt(equations.gravity * h_ll) - a_rr = sqrt(equations.gravity * h_rr) - - if orientation == 1 # x-direction - λ_min = min(v1_ll - a_ll, v1_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v1_ll + a_ll, v1_rr + a_rr, zero(eltype(u_ll))) - else # y-direction - λ_min = min(v2_ll - a_ll, v2_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v2_ll + a_ll, v2_rr + a_rr, zero(eltype(u_ll))) - end - - return λ_min, λ_max -end - -@inline function min_max_speed_chen_noelle(u_ll, u_rr, normal_direction::AbstractVector, - equations::ShallowWaterEquations2D) - h_ll = waterheight(u_ll, equations) - v1_ll, v2_ll = velocity(u_ll, equations) - h_rr = waterheight(u_rr, equations) - v1_rr, v2_rr = velocity(u_rr, equations) - - v_normal_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] - v_normal_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] - - norm_ = norm(normal_direction) - - a_ll = sqrt(equations.gravity * h_ll) * norm_ - a_rr = sqrt(equations.gravity * h_rr) * norm_ - - λ_min = min(v_normal_ll - a_ll, v_normal_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v_normal_ll + a_ll, v_normal_rr + a_rr, zero(eltype(u_ll))) - - return λ_min, λ_max -end - # More refined estimates for minimum and maximum wave speeds for HLL-type fluxes @inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations2D) @@ -1327,20 +1081,10 @@ end end # Calculate the error for the "lake-at-rest" test case where H = h+b should -# be a constant value over time. Note, assumes there is a single reference -# water height `H0` with which to compare. -# -# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need -# to modify or have different versions of the `lake_at_rest_error` function +# be a constant value over time. @inline function lake_at_rest_error(u, equations::ShallowWaterEquations2D) h, _, _, b = u - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - H0_wet_dry = max(equations.H0, b + equations.threshold_limiter) - - return abs(H0_wet_dry - (h + b)) + return abs(equations.H0 - (h + b)) end end # @muladd diff --git a/src/equations/shallow_water_quasi_1d.jl b/src/equations/shallow_water_quasi_1d.jl index d52fbab841d..51c360104a7 100644 --- a/src/equations/shallow_water_quasi_1d.jl +++ b/src/equations/shallow_water_quasi_1d.jl @@ -22,12 +22,6 @@ The gravitational constant is denoted by `g`, the (possibly) variable bottom top The additional quantity ``H_0`` is also available to store a reference value for the total water height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. -Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not -have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is -used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial -condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to -define when the flow is "wet" before calculating the numerical flux. - The bottom topography function ``b(x)`` and channel width ``a(x)`` are set inside the initial condition routine for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography variable `b` to zero and ``a`` to one. @@ -47,14 +41,6 @@ struct ShallowWaterEquationsQuasi1D{RealT <: Real} <: AbstractShallowWaterEquations{1, 4} gravity::RealT # gravitational constant H0::RealT # constant "lake-at-rest" total water height - # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height, - # as a (small) shift on the initial condition and cutoff before the next time step. - # Default is 500*eps() which in double precision is ≈1e-13. - threshold_limiter::RealT - # `threshold_wet` applied on water height to define when the flow is "wet" - # before calculating the numerical flux. - # Default is 5*eps() which in double precision is ≈1e-15. - threshold_wet::RealT end # Allow for flexibility to set the gravitational constant within an elixir depending on the @@ -62,17 +48,8 @@ end # The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" # well-balancedness test cases. # Strict default values for thresholds that performed well in many numerical experiments -function ShallowWaterEquationsQuasi1D(; gravity_constant, H0 = zero(gravity_constant), - threshold_limiter = nothing, - threshold_wet = nothing) - T = promote_type(typeof(gravity_constant), typeof(H0)) - if threshold_limiter === nothing - threshold_limiter = 500 * eps(T) - end - if threshold_wet === nothing - threshold_wet = 5 * eps(T) - end - ShallowWaterEquationsQuasi1D(gravity_constant, H0, threshold_limiter, threshold_wet) +function ShallowWaterEquationsQuasi1D(; gravity_constant, H0 = zero(gravity_constant)) + ShallowWaterEquationsQuasi1D(gravity_constant, H0) end have_nonconservative_terms(::ShallowWaterEquationsQuasi1D) = True() @@ -338,18 +315,10 @@ end # be a constant value over time. Note, assumes there is a single reference # water height `H0` with which to compare. # -# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need -# to modify or have different versions of the `lake_at_rest_error` function @inline function lake_at_rest_error(u, equations::ShallowWaterEquationsQuasi1D) _, _, b, _ = u h = waterheight(u, equations) - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - H0_wet_dry = max(equations.H0, b + equations.threshold_limiter) - - return abs(H0_wet_dry - (h + b)) + return abs(equations.H0 - (h + b)) end end # @muladd diff --git a/src/equations/shallow_water_two_layer_1d.jl b/src/equations/shallow_water_two_layer_1d.jl deleted file mode 100644 index 42ff393593e..00000000000 --- a/src/equations/shallow_water_two_layer_1d.jl +++ /dev/null @@ -1,511 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 1D two layer equations should move to new package - -@doc raw""" - ShallowWaterTwoLayerEquations1D(gravity, H0, rho_upper, rho_lower) - -Two-Layer Shallow Water equations (2LSWE) in one space dimension. The equations are given by -```math -\begin{alignat*}{4} -&\frac{\partial}{\partial t}h_{upper} -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}\right) -&&= 0 \\ -&\frac{\partial}{\partial t}\left(h_{upper}v_{1,upper}\right) -&&+ \frac{\partial}{\partial x}\left(h_{upper}v_{1,upper}^2 + \dfrac{gh_{upper}^2}{2}\right) -&&= -gh_{upper}\frac{\partial}{\partial x}\left(b+h_{lower}\right)\\ -&\frac{\partial}{\partial t}h_{lower} -&&+ \frac{\partial}{\partial x}\left(h_{lower}v_{1,lower}\right) -&&= 0 \\ -&\frac{\partial}{\partial t}\left(h_{lower}v_{1,lower}\right) -&&+ \frac{\partial}{\partial x}\left(h_{lower}v_{1,lower}^2 + \dfrac{gh_{lower}^2}{2}\right) -&&= -gh_{lower}\frac{\partial}{\partial x}\left(b+\dfrac{\rho_{upper}}{\rho_{lower}}h_{upper}\right). -\end{alignat*} -``` -The unknown quantities of the 2LSWE are the water heights of the {lower} layer ``h_{lower}`` and the -{upper} layer ``h_{upper}`` with respective velocities ``v_{1,upper}`` and ``v_{1,lower}``. The gravitational constant is -denoted by `g`, the layer densitites by ``\rho_{upper}``and ``\rho_{lower}`` and the (possibly) variable -bottom topography function ``b(x)``. The conservative variable water height ``h_{lower}`` is measured -from the bottom topography ``b`` and ``h_{upper}`` relative to ``h_{lower}``, therefore one also defines the -total water heights as ``H_{upper} = h_{upper} + h_{upper} + b`` and ``H_{lower} = h_{lower} + b``. - -The densities must be chosen such that ``\rho_{upper} < \rho_{lower}``, to make sure that the heavier fluid -``\rho_{lower}`` is in the bottom layer and the lighter fluid ``\rho_{upper}`` in the {upper} layer. - -The additional quantity ``H_0`` is also available to store a reference value for the total water -height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. - -The bottom topography function ``b(x)`` is set inside the initial condition routine -for a particular problem setup. - -In addition to the unknowns, Trixi currently stores the bottom topography values at the -approximation points despite being fixed in time. This is done for convenience of computing the -bottom topography gradients on the fly during the approximation as well as computing auxiliary -quantities like the total water height ``H`` or the entropy variables. -This affects the implementation and use of these equations in various ways: -* The flux values corresponding to the bottom topography must be zero. -* The bottom topography values must be included when defining initial conditions, boundary - conditions or source terms. -* [`AnalysisCallback`](@ref) analyzes this variable. -* Trixi's visualization tools will visualize the bottom topography by default. - -A good introduction for the 2LSWE is available in Chapter 12 of the book: -- Benoit Cushman-Roisin (2011)\ - Introduction to geophyiscal fluid dynamics: physical and numerical aspects\ - \ - ISBN: 978-0-12-088759-0 -""" -struct ShallowWaterTwoLayerEquations1D{RealT <: Real} <: - AbstractShallowWaterEquations{1, 5} - gravity::RealT # gravitational constant - H0::RealT # constant "lake-at-rest" total water height - rho_upper::RealT # lower layer density - rho_lower::RealT # upper layer density - r::RealT # ratio of rho_upper / rho_lower -end - -# Allow for flexibility to set the gravitational constant within an elixir depending on the -# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values. -# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" -# well-balancedness test cases. Densities must be specified such that rho_upper <= rho_lower. -function ShallowWaterTwoLayerEquations1D(; gravity_constant, - H0 = zero(gravity_constant), rho_upper, - rho_lower) - # Assign density ratio if rho_upper <= rho_lower - if rho_upper > rho_lower - error("Invalid input: Densities must be chosen such that rho_upper <= rho_lower") - else - r = rho_upper / rho_lower - end - ShallowWaterTwoLayerEquations1D(gravity_constant, H0, rho_upper, rho_lower, r) -end - -have_nonconservative_terms(::ShallowWaterTwoLayerEquations1D) = True() -function varnames(::typeof(cons2cons), ::ShallowWaterTwoLayerEquations1D) - ("h_upper", "h_v_upper", - "h_lower", "h_v_lower", "b") -end -# Note, we use the total water height, H_lower = h_upper + h_lower + b, and first layer total height -# H_upper = h_upper + b as the first primitive variable for easier visualization and setting initial -# conditions -function varnames(::typeof(cons2prim), ::ShallowWaterTwoLayerEquations1D) - ("H_upper", "v_upper", - "H_lower", "v_lower", "b") -end - -# Set initial conditions at physical location `x` for time `t` -""" - initial_condition_convergence_test(x, t, equations::ShallowWaterTwoLayerEquations1D) - -A smooth initial condition used for convergence tests in combination with -[`source_terms_convergence_test`](@ref) (and -[`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains). -""" -function initial_condition_convergence_test(x, t, - equations::ShallowWaterTwoLayerEquations1D) - # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)] - ω = 2.0 * pi * sqrt(2.0) - - H_lower = 2.0 + 0.1 * sin(ω * x[1] + t) - H_upper = 4.0 + 0.1 * cos(ω * x[1] + t) - v_lower = 1.0 - v_upper = 0.9 - b = 1.0 + 0.1 * cos(2.0 * ω * x[1]) - - return prim2cons(SVector(H_upper, v_upper, H_lower, v_lower, b), equations) -end - -""" - source_terms_convergence_test(u, x, t, equations::ShallowWaterTwoLayerEquations1D) - -Source terms used for convergence tests in combination with -[`initial_condition_convergence_test`](@ref) -(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) -in non-periodic domains). -""" -@inline function source_terms_convergence_test(u, x, t, - equations::ShallowWaterTwoLayerEquations1D) - # Same settings as in `initial_condition_convergence_test`. Some derivative simplify because - # this manufactured solution velocity is taken to be constant - ω = 2 * pi * sqrt(2.0) - - du1 = (-0.1 * cos(t + ω * x[1]) - 0.1 * sin(t + ω * x[1]) - - 0.09 * ω * cos(t + ω * x[1]) + - -0.09 * ω * sin(t + ω * x[1])) - du2 = (5.0 * (-0.1 * ω * cos(t + ω * x[1]) - 0.1 * ω * sin(t + ω * x[1])) * - (4.0 + 0.2 * cos(t + ω * x[1]) + - -0.2 * sin(t + ω * x[1])) + - 0.1 * ω * (20.0 + cos(t + ω * x[1]) - sin(t + ω * x[1])) * - cos(t + - ω * x[1]) - 0.09 * cos(t + ω * x[1]) - 0.09 * sin(t + ω * x[1]) - - 0.081 * ω * cos(t + ω * x[1]) + - -0.081 * ω * sin(t + ω * x[1])) - du3 = 0.1 * cos(t + ω * x[1]) + 0.1 * ω * cos(t + ω * x[1]) + - 0.2 * ω * sin(2.0 * ω * x[1]) - du4 = ((10.0 + sin(t + ω * x[1]) - cos(2ω * x[1])) * - (-0.09 * ω * cos(t + ω * x[1]) - 0.09 * ω * sin(t + - ω * x[1]) - - 0.2 * ω * sin(2 * ω * x[1])) + 0.1 * cos(t + ω * x[1]) + - 0.1 * ω * cos(t + ω * x[1]) + - 5.0 * (0.1 * ω * cos(t + ω * x[1]) + 0.2 * ω * sin(2.0 * ω * x[1])) * - (2.0 + 0.2 * sin(t + ω * x[1]) + - -0.2 * cos(2.0 * ω * x[1])) + 0.2 * ω * sin(2.0 * ω * x[1])) - - return SVector(du1, du2, du3, du4, zero(eltype(u))) -end - -""" - boundary_condition_slip_wall(u_inner, orientation_or_normal, x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations1D) - -Create a boundary state by reflecting the normal velocity component and keep -the tangential velocity component unchanged. The boundary water height is taken from -the internal value. - -For details see Section 9.2.5 of the book: -- Eleuterio F. Toro (2001) - Shock-Capturing Methods for Free-Surface Shallow Flows - 1st edition - ISBN 0471987662 -""" -@inline function boundary_condition_slip_wall(u_inner, orientation_or_normal, direction, - x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations1D) - # create the "external" boundary solution state - u_boundary = SVector(u_inner[1], - -u_inner[2], - u_inner[3], - -u_inner[4], - u_inner[5]) - - # calculate the boundary flux - if iseven(direction) # u_inner is "left" of boundary, u_boundary is "right" of boundary - f = surface_flux_function(u_inner, u_boundary, orientation_or_normal, equations) - else # u_boundary is "left" of boundary, u_inner is "right" of boundary - f = surface_flux_function(u_boundary, u_inner, orientation_or_normal, equations) - end - return f -end - -# Calculate 1D flux for a single point -# Note, the bottom topography has no flux -@inline function flux(u, orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - - # Calculate velocities - v_upper, v_lower = velocity(u, equations) - # Calculate pressure - p_upper = 0.5 * equations.gravity * h_upper^2 - p_lower = 0.5 * equations.gravity * h_lower^2 - - f1 = h_v_upper - f2 = h_v_upper * v_upper + p_upper - f3 = h_v_lower - f4 = h_v_lower * v_lower + p_lower - - return SVector(f1, f2, f3, f4, zero(eltype(u))) -end - -""" - flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - -!!! warning "Experimental code" - This numerical flux is experimental and may change in any future release. - -Non-symmetric path-conservative two-point volume flux discretizing the nonconservative (source) term -that contains the gradient of the bottom topography [`ShallowWaterTwoLayerEquations1D`](@ref) and an -additional term that couples the momentum of both layers. - -This is a modified version of [`flux_nonconservative_wintermeyer_etal`](@ref) that gives entropy -conservation and well-balancedness in both the volume and surface when combined with -[`flux_wintermeyer_etal`](@ref). - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_nonconservative_ersing_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Pull the necessary left and right state information - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - b_rr = u_rr[5] - b_ll = u_ll[5] - - # Calculate jumps - h_upper_jump = (h_upper_rr - h_upper_ll) - h_lower_jump = (h_lower_rr - h_lower_ll) - b_jump = (b_rr - b_ll) - - z = zero(eltype(u_ll)) - - # Bottom gradient nonconservative term: (0, g*h_upper*(b+h_lower)_x, - # 0, g*h_lower*(b+r*h_upper)_x, 0) - f = SVector(z, - equations.gravity * h_upper_ll * (b_jump + h_lower_jump), - z, - equations.gravity * h_lower_ll * (b_jump + equations.r * h_upper_jump), - z) - return f -end - -""" - flux_wintermeyer_etal(u_ll, u_rr, orientation, - equations::ShallowWaterTwoLayerEquations1D) - -Total energy conservative (mathematical entropy for two-layer shallow water equations) split form. -When the bottom topography is nonzero this scheme will be well-balanced when used with the -nonconservative [`flux_nonconservative_ersing_etal`](@ref). To obtain the flux for the -two-layer shallow water equations the flux that is described in the paper for the normal shallow -water equations is used within each layer. - -Further details are available in Theorem 1 of the paper: -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and David A. Kopriva (2017) - An entropy stable nodal discontinuous Galerkin method for the two dimensional - shallow water equations on unstructured curvilinear meshes with discontinuous bathymetry - [DOI: 10.1016/j.jcp.2017.03.036](https://doi.org/10.1016/j.jcp.2017.03.036) -""" -@inline function flux_wintermeyer_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Unpack left and right state - h_upper_ll, h_v_upper_ll, h_lower_ll, h_v_lower_ll, _ = u_ll - h_upper_rr, h_v_upper_rr, h_lower_rr, h_v_lower_rr, _ = u_rr - - # Get the velocities on either side - v_upper_ll, v_lower_ll = velocity(u_ll, equations) - v_upper_rr, v_lower_rr = velocity(u_rr, equations) - - # Average each factor of products in flux - v_upper_avg = 0.5 * (v_upper_ll + v_upper_rr) - v_lower_avg = 0.5 * (v_lower_ll + v_lower_rr) - p_upper_avg = 0.5 * equations.gravity * h_upper_ll * h_upper_rr - p_lower_avg = 0.5 * equations.gravity * h_lower_ll * h_lower_rr - - # Calculate fluxes - f1 = 0.5 * (h_v_upper_ll + h_v_upper_rr) - f2 = f1 * v_upper_avg + p_upper_avg - f3 = 0.5 * (h_v_lower_ll + h_v_lower_rr) - f4 = f3 * v_lower_avg + p_lower_avg - - return SVector(f1, f2, f3, f4, zero(eltype(u_ll))) -end - -""" - flux_es_ersing_etal(u_ll, u_rr, orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations1D) -Entropy stable surface flux for the two-layer shallow water equations. Uses the entropy conservative -[`flux_wintermeyer_etal`](@ref) and adds a Lax-Friedrichs type dissipation dependent on the jump of -entropy variables. - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_es_ersing_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Compute entropy conservative flux but without the bottom topography - f_ec = flux_wintermeyer_etal(u_ll, u_rr, - orientation, - equations) - - # Get maximum signal velocity - λ = max_abs_speed_naive(u_ll, u_rr, orientation, equations) - # Get entropy variables but without the bottom topography - q_rr = cons2entropy(u_rr, equations) - q_ll = cons2entropy(u_ll, equations) - - # Average values from left and right - u_avg = (u_ll + u_rr) / 2 - - # Introduce variables for better readability - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - g = equations.gravity - drho = rho_upper - rho_lower - - # Compute entropy Jacobian coefficients - h11 = -rho_lower / (g * rho_upper * drho) - h12 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h13 = 1.0 / (g * drho) - h14 = u_avg[4] / (g * u_avg[3] * drho) - h21 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h22 = ((g * rho_upper * u_avg[1]^3 - g * rho_lower * u_avg[1]^3 + - -rho_lower * u_avg[2]^2) / (g * rho_upper * u_avg[1]^2 * drho)) - h23 = u_avg[2] / (g * u_avg[1] * drho) - h24 = u_avg[2] * u_avg[4] / (g * u_avg[1] * u_avg[3] * drho) - h31 = 1.0 / (g * drho) - h32 = u_avg[2] / (g * u_avg[1] * drho) - h33 = -1.0 / (g * drho) - h34 = -u_avg[4] / (g * u_avg[3] * drho) - h41 = u_avg[4] / (g * u_avg[3] * drho) - h42 = u_avg[2] * u_avg[4] / (g * u_avg[1] * u_avg[3] * drho) - h43 = -u_avg[4] / (g * u_avg[3] * drho) - h44 = ((g * rho_upper * u_avg[3]^3 - g * rho_lower * u_avg[3]^3 + - -rho_lower * u_avg[4]^2) / (g * rho_lower * u_avg[3]^2 * drho)) - - # Entropy Jacobian matrix - H = @SMatrix [[h11;; h12;; h13;; h14;; 0]; - [h21;; h22;; h23;; h24;; 0]; - [h31;; h32;; h33;; h34;; 0]; - [h41;; h42;; h43;; h44;; 0]; - [0;; 0;; 0;; 0;; 0]] - - # Add dissipation to entropy conservative flux to obtain entropy stable flux - f_es = f_ec - 0.5 * λ * H * (q_rr - q_ll) - - return SVector(f_es[1], f_es[2], f_es[3], f_es[4], zero(eltype(u_ll))) -end - -# Calculate approximation for maximum wave speed for local Lax-Friedrichs-type dissipation as the -# maximum velocity magnitude plus the maximum speed of sound. This function uses approximate -# eigenvalues using the speed of the barotropic mode as there is no simple way to calculate them -# analytically. -# -# A good overview of the derivation is given in: -# - Jonas Nycander, Andrew McC. Hogg, Leela M. Frankcombe (2008) -# Open boundary conditions for nonlinear channel Flows -# [DOI: 10.1016/j.ocemod.2008.06.003](https://doi.org/10.1016/j.ocemod.2008.06.003) -@inline function max_abs_speed_naive(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Unpack left and right state - h_upper_ll, h_v_upper_ll, h_lower_ll, h_v_lower_ll, _ = u_ll - h_upper_rr, h_v_upper_rr, h_lower_rr, h_v_lower_rr, _ = u_rr - - # Get the averaged velocity - v_m_ll = (h_v_upper_ll + h_v_lower_ll) / (h_upper_ll + h_lower_ll) - v_m_rr = (h_v_upper_rr + h_v_lower_rr) / (h_upper_rr + h_lower_rr) - - # Calculate the wave celerity on the left and right - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - c_ll = sqrt(equations.gravity * (h_upper_ll + h_lower_ll)) - c_rr = sqrt(equations.gravity * (h_upper_rr + h_lower_rr)) - - return (max(abs(v_m_ll) + c_ll, abs(v_m_rr) + c_rr)) -end - -# Specialized `DissipationLocalLaxFriedrichs` to avoid spurious dissipation in the bottom -# topography -@inline function (dissipation::DissipationLocalLaxFriedrichs)(u_ll, u_rr, - orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations1D) - λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, - equations) - diss = -0.5 * λ * (u_rr - u_ll) - return SVector(diss[1], diss[2], diss[3], diss[4], zero(eltype(u_ll))) -end - -# Absolute speed of the barotropic mode -@inline function max_abs_speeds(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - - # Calculate averaged velocity of both layers - v_m = (h_v_upper + h_v_lower) / (h_upper + h_lower) - c = sqrt(equations.gravity * (h_upper + h_lower)) - - return (abs(v_m) + c) -end - -# Helper function to extract the velocity vector from the conservative variables -@inline function velocity(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - - v_upper = h_v_upper / h_upper - v_lower = h_v_lower / h_lower - return SVector(v_upper, v_lower) -end - -# Convert conservative variables to primitive -@inline function cons2prim(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, _, h_lower, _, b = u - - H_lower = h_lower + b - H_upper = h_lower + h_upper + b - v_upper, v_lower = velocity(u, equations) - return SVector(H_upper, v_upper, H_lower, v_lower, b) -end - -# Convert conservative variables to entropy variables -# Note, only the first four are the entropy variables, the fifth entry still just carries the -# bottom topography values for convenience -@inline function cons2entropy(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, _, h_lower, _, b = u - v_upper, v_lower = velocity(u, equations) - - w1 = (equations.rho_upper * - (equations.gravity * (h_upper + h_lower + b) - 0.5 * v_upper^2)) - w2 = equations.rho_upper * v_upper - w3 = (equations.rho_lower * - (equations.gravity * (equations.r * h_upper + h_lower + b) - 0.5 * v_lower^2)) - w4 = equations.rho_lower * v_lower - return SVector(w1, w2, w3, w4, b) -end - -# Convert primitive to conservative variables -@inline function prim2cons(prim, equations::ShallowWaterTwoLayerEquations1D) - H_upper, v_upper, H_lower, v_lower, b = prim - - h_lower = H_lower - b - h_upper = H_upper - h_lower - b - h_v_upper = h_upper * v_upper - h_v_lower = h_lower * v_lower - return SVector(h_upper, h_v_upper, h_lower, h_v_lower, b) -end - -@inline function waterheight(u, equations::ShallowWaterTwoLayerEquations1D) - return SVector(u[1], u[3]) -end - -# Entropy function for the shallow water equations is the total energy -@inline function entropy(cons, equations::ShallowWaterTwoLayerEquations1D) - energy_total(cons, equations) -end - -# Calculate total energy for a conservative state `cons` -@inline function energy_total(cons, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, b = cons - # Set new variables for better readability - g = equations.gravity - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - - e = (0.5 * rho_upper * (h_v_upper^2 / h_upper + g * h_upper^2) + - 0.5 * rho_lower * (h_v_lower^2 / h_lower + g * h_lower^2) + - g * rho_lower * h_lower * b + g * rho_upper * h_upper * (h_lower + b)) - return e -end - -# Calculate kinetic energy for a conservative state `cons` -@inline function energy_kinetic(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - return (0.5 * equations.rho_upper * h_v_upper^2 / h_upper + - 0.5 * equations.rho_lower * h_v_lower^2 / h_lower) -end - -# Calculate potential energy for a conservative state `cons` -@inline function energy_internal(cons, equations::ShallowWaterTwoLayerEquations1D) - return energy_total(cons, equations) - energy_kinetic(cons, equations) -end - -# Calculate the error for the "lake-at-rest" test case where H = h_upper+h_lower+b should -# be a constant value over time -@inline function lake_at_rest_error(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, _, h_lower, _, b = u - return abs(equations.H0 - (h_upper + h_lower + b)) -end -end # @muladd diff --git a/src/equations/shallow_water_two_layer_2d.jl b/src/equations/shallow_water_two_layer_2d.jl deleted file mode 100644 index a31d881f2ef..00000000000 --- a/src/equations/shallow_water_two_layer_2d.jl +++ /dev/null @@ -1,805 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 2D two layer equations should move to new package - -@doc raw""" - ShallowWaterTwoLayerEquations2D(gravity, H0, rho_upper, rho_lower) - -Two-Layer Shallow water equations (2LSWE) in two space dimension. The equations are given by -```math -\begin{alignat*}{8} -&\frac{\partial}{\partial t}h_{upper} -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}\right) -&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}\right) \quad -&&= \quad 0 \\ -&\frac{\partial}{\partial t}\left(h_{upper} v_{1,upper}\right) -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}^2 + \frac{gh_{upper}^2}{2}\right) -&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{1,upper} v_{2,upper}\right) \quad -&&= -gh_{upper}\frac{\partial}{\partial x}\left(b+h_{lower}\right) \\ -&\frac{\partial}{\partial t}\left(h_{upper} v_{2,upper}\right) -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper} v_{2,upper}\right) -&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}^2 + \frac{gh_{upper}^2}{2}\right) -&&= -gh_{upper}\frac{\partial}{\partial y}\left(b+h_{lower}\right)\\ -&\frac{\partial}{\partial t}h_{lower} -&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}\right) -&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}\right) -&&= \quad 0 \\ -&\frac{\partial}{\partial t}\left(h_{lower} v_{1,lower}\right) -&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}^2 + \frac{gh_{lower}^2}{2}\right) -&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{1,lower} v_{2,lower}\right) -&&= -gh_{lower}\frac{\partial}{\partial x}\left(b+\frac{\rho_{upper}}{\rho_{lower}} h_{upper}\right)\\ -&\frac{\partial}{\partial t}\left(h_{lower} v_{2,lower}\right) -&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower} v_{2,lower}\right) -&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}^2 + \frac{gh_{lower}^2}{2}\right) -&&= -gh_{lower}\frac{\partial}{\partial y}\left(b+\frac{\rho_{upper}}{\rho_{lower}} h_{upper}\right) -\end{alignat*} -``` -The unknown quantities of the 2LSWE are the water heights of the lower layer ``h_{lower}`` and the -upper -layer ``h_{upper}`` and the respective velocities in x-direction ``v_{1,lower}`` and ``v_{1,upper}`` and in y-direction -``v_{2,lower}`` and ``v_{2,upper}``. The gravitational constant is denoted by `g`, the layer densitites by -``\rho_{upper}``and ``\rho_{lower}`` and the (possibly) variable bottom topography function by ``b(x)``. -Conservative variable water height ``h_{lower}`` is measured from the bottom topography ``b`` and ``h_{upper}`` -relative to ``h_{lower}``, therefore one also defines the total water heights as ``H_{lower} = h_{lower} + b`` and -``H_{upper} = h_{upper} + h_{lower} + b``. - -The densities must be chosen such that ``\rho_{upper} < \rho_{lower}``, to make sure that the heavier fluid -``\rho_{lower}`` is in the bottom layer and the lighter fluid ``\rho_{upper}`` in the upper layer. - -The additional quantity ``H_0`` is also available to store a reference value for the total water -height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. - -The bottom topography function ``b(x)`` is set inside the initial condition routine -for a particular problem setup. - -In addition to the unknowns, Trixi currently stores the bottom topography values at the -approximation points despite being fixed in time. This is done for convenience of computing the -bottom topography gradients on the fly during the approximation as well as computing auxiliary -quantities like the total water height ``H`` or the entropy variables. -This affects the implementation and use of these equations in various ways: -* The flux values corresponding to the bottom topography must be zero. -* The bottom topography values must be included when defining initial conditions, boundary - conditions or source terms. -* [`AnalysisCallback`](@ref) analyzes this variable. -* Trixi's visualization tools will visualize the bottom topography by default. - -A good introduction for the 2LSWE is available in Chapter 12 of the book: - - Benoit Cushman-Roisin (2011)\ - Introduction to geophyiscal fluid dynamics: physical and numerical aspects\ - \ - ISBN: 978-0-12-088759-0 -""" -struct ShallowWaterTwoLayerEquations2D{RealT <: Real} <: - AbstractShallowWaterEquations{2, 7} - gravity::RealT # gravitational constant - H0::RealT # constant "lake-at-rest" total water height - rho_upper::RealT # lower layer density - rho_lower::RealT # upper layer density - r::RealT # ratio of rho_upper / rho_lower -end - -# Allow for flexibility to set the gravitational constant within an elixir depending on the -# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values. -# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" -# well-balancedness test cases. Densities must be specified such that rho_upper < rho_lower. -function ShallowWaterTwoLayerEquations2D(; gravity_constant, - H0 = zero(gravity_constant), rho_upper, - rho_lower) - # Assign density ratio if rho_upper <= rho_lower - if rho_upper > rho_lower - error("Invalid input: Densities must be chosen such that rho_upper <= rho_lower") - else - r = rho_upper / rho_lower - end - ShallowWaterTwoLayerEquations2D(gravity_constant, H0, rho_upper, rho_lower, r) -end - -have_nonconservative_terms(::ShallowWaterTwoLayerEquations2D) = True() -function varnames(::typeof(cons2cons), ::ShallowWaterTwoLayerEquations2D) - ("h_upper", "h_v1_upper", "h_v2_upper", "h_lower", "h_v1_lower", "h_v2_lower", "b") -end -# Note, we use the total water height, H_upper = h_upper + h_lower + b, and first layer total height -# H_lower = h_lower + b as the first primitive variable for easier visualization and setting initial -# conditions -function varnames(::typeof(cons2prim), ::ShallowWaterTwoLayerEquations2D) - ("H_upper", "v1_upper", "v2_upper", "H_lower", "v1_lower", "v2_lower", "b") -end - -# Set initial conditions at physical location `x` for time `t` -""" - initial_condition_convergence_test(x, t, equations::ShallowWaterTwoLayerEquations2D) - -A smooth initial condition used for convergence tests in combination with -[`source_terms_convergence_test`](@ref). Constants must be set to ``rho_{upper} = 0.9``, -``rho_{lower} = 1.0``, ``g = 10.0``. -""" -function initial_condition_convergence_test(x, t, - equations::ShallowWaterTwoLayerEquations2D) - # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)]^2] - ω = 2.0 * pi * sqrt(2.0) - - H_lower = 2.0 + 0.1 * sin(ω * x[1] + t) * cos(ω * x[2] + t) - H_upper = 4.0 + 0.1 * cos(ω * x[1] + t) * sin(ω * x[2] + t) - v1_lower = 1.0 - v1_upper = 0.9 - v2_lower = 0.9 - v2_upper = 1.0 - b = 1.0 + 0.1 * cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) - - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, - b), equations) -end - -""" - source_terms_convergence_test(u, x, t, equations::ShallowWaterTwoLayerEquations2D) - -Source terms used for convergence tests in combination with -[`initial_condition_convergence_test`](@ref). -""" -@inline function source_terms_convergence_test(u, x, t, - equations::ShallowWaterTwoLayerEquations2D) - # Same settings as in `initial_condition_convergence_test`. - # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)]^2] - ω = 2.0 * pi * sqrt(2.0) - - # Source terms obtained with SymPy - du1 = 0.01 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.01 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2]) - du2 = (5.0 * - (-0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) - - 0.1 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2])) * - (4.0 + 0.2cos(t + ω * x[1]) * sin(t + ω * x[2]) - - 0.2 * sin(t + ω * x[1]) * cos(t + - ω * x[2])) + - 0.009 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.009 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2]) + - 0.1 * ω * - (20.0 + cos(t + ω * x[1]) * sin(t + ω * x[2]) - - sin(t + ω * x[1]) * cos(t + - ω * x[2])) * cos(t + ω * x[1]) * cos(t + ω * x[2])) - du3 = (5.0 * - (0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.1 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2])) * - (4.0 + 0.2 * cos(t + ω * x[1]) * sin(t + ω * x[2]) - - 0.2 * sin(t + ω * x[1]) * cos(t + - ω * x[2])) + - 0.01ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.01 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2]) + - -0.1 * ω * - (20.0 + cos(t + ω * x[1]) * sin(t + ω * x[2]) - - sin(t + ω * x[1]) * cos(t + ω * x[2])) * sin(t + - ω * x[1]) * sin(t + ω * x[2])) - du4 = (0.1 * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) - - 0.1 * sin(t + ω * x[1]) * sin(t + ω * x[2]) + - -0.045 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) - - 0.09 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2])) - du5 = ((10.0 + sin(t + ω * x[1]) * cos(t + ω * x[2]) - - cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) * (-0.09 * ω * cos(t + - ω * x[1]) * cos(t + ω * x[2]) - - 0.09 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2]) + - -0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) + - 5.0 * - (0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) * - (2.0 + 0.2 * sin(t + ω * x[1]) * cos(t + ω * x[2]) + - -0.2 * cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) + - 0.1 * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.1 * ω * cos(t + - ω * x[1]) * cos(t + ω * x[2]) + - 0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) - - 0.1 * sin(t + - ω * x[1]) * sin(t + ω * x[2]) - - 0.045 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) - - 0.09 * ω * sin(t + - ω * x[1]) * sin(t + ω * x[2])) - du6 = ((10.0 + sin(t + ω * x[1]) * cos(t + ω * x[2]) + - -cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) * - (0.05 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) + - 0.09 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.09 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2])) + - 5.0 * - (-0.05 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) - - 0.1 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2])) * - (2.0 + 0.2 * sin(t + ω * x[1]) * cos(t + ω * x[2]) + - -0.2 * cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) + - 0.09cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.09 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.045 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) + - -0.09 * sin(t + ω * x[1]) * sin(t + ω * x[2]) - - 0.0405 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) + - -0.081 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2])) - - return SVector(du1, du2, du3, du4, du5, du6, zero(eltype(u))) -end - -""" - boundary_condition_slip_wall(u_inner, normal_direction, x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations2D) - -Create a boundary state by reflecting the normal velocity component and keep -the tangential velocity component unchanged. The boundary water height is taken from -the internal value. - -For details see Section 9.2.5 of the book: -- Eleuterio F. Toro (2001) - Shock-Capturing Methods for Free-Surface Shallow Flows - 1st edition - ISBN 0471987662 -""" -@inline function boundary_condition_slip_wall(u_inner, normal_direction::AbstractVector, - x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations2D) - # normalize the outward pointing direction - normal = normal_direction / norm(normal_direction) - - # compute the normal velocity - v_normal_upper = normal[1] * u_inner[2] + normal[2] * u_inner[3] - v_normal_lower = normal[1] * u_inner[5] + normal[2] * u_inner[6] - - # create the "external" boundary solution state - u_boundary = SVector(u_inner[1], - u_inner[2] - 2.0 * v_normal_upper * normal[1], - u_inner[3] - 2.0 * v_normal_upper * normal[2], - u_inner[4], - u_inner[5] - 2.0 * v_normal_lower * normal[1], - u_inner[6] - 2.0 * v_normal_lower * normal[2], - u_inner[7]) - - # calculate the boundary flux - flux = surface_flux_function(u_inner, u_boundary, normal_direction, equations) - return flux -end - -# Calculate 1D flux for a single point -# Note, the bottom topography has no flux -@inline function flux(u, orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, _ = u - - # Calculate velocities - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - # Calculate pressure - p_upper = 0.5 * equations.gravity * h_upper^2 - p_lower = 0.5 * equations.gravity * h_lower^2 - - # Calculate fluxes depending on orientation - if orientation == 1 - f1 = h_v1_upper - f2 = h_v1_upper * v1_upper + p_upper - f3 = h_v1_upper * v2_upper - f4 = h_v1_lower - f5 = h_v1_lower * v1_lower + p_lower - f6 = h_v1_lower * v2_lower - else - f1 = h_v2_upper - f2 = h_v2_upper * v1_upper - f3 = h_v2_upper * v2_upper + p_upper - f4 = h_v2_lower - f5 = h_v2_lower * v1_lower - f6 = h_v2_lower * v2_lower + p_lower - end - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u))) -end - -# Calculate 1D flux for a single point in the normal direction -# Note, this directional vector is not normalized and the bottom topography has no flux -@inline function flux(u, normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_lower = waterheight(u, equations) - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - v_normal_upper = v1_upper * normal_direction[1] + v2_upper * normal_direction[2] - v_normal_lower = v1_lower * normal_direction[1] + v2_lower * normal_direction[2] - h_v_upper_normal = h_upper * v_normal_upper - h_v_lower_normal = h_lower * v_normal_lower - - p_upper = 0.5 * equations.gravity * h_upper^2 - p_lower = 0.5 * equations.gravity * h_lower^2 - - f1 = h_v_upper_normal - f2 = h_v_upper_normal * v1_upper + p_upper * normal_direction[1] - f3 = h_v_upper_normal * v2_upper + p_upper * normal_direction[2] - f4 = h_v_lower_normal - f5 = h_v_lower_normal * v1_lower + p_lower * normal_direction[1] - f6 = h_v_lower_normal * v2_lower + p_lower * normal_direction[2] - - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u))) -end - -""" - flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - flux_nonconservative_ersing_etal(u_ll, u_rr, - normal_direction_ll::AbstractVector, - normal_direction_average::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - -!!! warning "Experimental code" - This numerical flux is experimental and may change in any future release. - -Non-symmetric path-conservative two-point volume flux discretizing the nonconservative (source) term -that contains the gradient of the bottom topography [`ShallowWaterTwoLayerEquations2D`](@ref) and an -additional term that couples the momentum of both layers. - -This is a modified version of [`flux_nonconservative_wintermeyer_etal`](@ref) that gives entropy -conservation and well-balancedness in both the volume and surface when combined with -[`flux_wintermeyer_etal`](@ref). - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_nonconservative_ersing_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - # Pull the necessary left and right state information - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - b_rr = u_rr[7] - b_ll = u_ll[7] - - # Calculate jumps - h_upper_jump = (h_upper_rr - h_upper_ll) - h_lower_jump = (h_lower_rr - h_lower_ll) - b_jump = (b_rr - b_ll) - - z = zero(eltype(u_ll)) - - # Bottom gradient nonconservative term: (0, g*h_upper*(b + h_lower)_x, g*h_upper*(b + h_lower)_y , - # 0, g*h_lower*(b + r*h_upper)_x, - # g*h_lower*(b + r*h_upper)_y, 0) - if orientation == 1 - f = SVector(z, - equations.gravity * h_upper_ll * (b_jump + h_lower_jump), - z, z, - equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - z, z) - else # orientation == 2 - f = SVector(z, z, - equations.gravity * h_upper_ll * (b_jump + h_lower_jump), - z, z, - equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - z) - end - - return f -end - -@inline function flux_nonconservative_ersing_etal(u_ll, u_rr, - normal_direction_ll::AbstractVector, - normal_direction_average::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - # Pull the necessary left and right state information - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - b_rr = u_rr[7] - b_ll = u_ll[7] - - # Calculate jumps - h_upper_jump = (h_upper_rr - h_upper_ll) - h_lower_jump = (h_lower_rr - h_lower_ll) - b_jump = (b_rr - b_ll) - - # Note this routine only uses the `normal_direction_average` and the average of the - # bottom topography to get a quadratic split form DG gradient on curved elements - return SVector(zero(eltype(u_ll)), - normal_direction_average[1] * equations.gravity * h_upper_ll * - (b_jump + h_lower_jump), - normal_direction_average[2] * equations.gravity * h_upper_ll * - (b_jump + h_lower_jump), - zero(eltype(u_ll)), - normal_direction_average[1] * equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - normal_direction_average[2] * equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - zero(eltype(u_ll))) -end - -""" - flux_wintermeyer_etal(u_ll, u_rr, orientation, - equations::ShallowWaterTwoLayerEquations2D) - flux_wintermeyer_etal(u_ll, u_rr, - normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - -Total energy conservative (mathematical entropy for two-layer shallow water equations) split form. -When the bottom topography is nonzero this scheme will be well-balanced when used with the -nonconservative [`flux_nonconservative_ersing_etal`](@ref). To obtain the flux for the -two-layer shallow water equations the flux that is described in the paper for the normal shallow -water equations is used within each layer. - -Further details are available in Theorem 1 of the paper: -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and David A. Kopriva (2017) - An entropy stable nodal discontinuous Galerkin method for the two dimensional - shallow water equations on unstructured curvilinear meshes with discontinuous bathymetry - [DOI: 10.1016/j.jcp.2017.03.036](https://doi.org/10.1016/j.jcp.2017.03.036) -""" -@inline function flux_wintermeyer_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, h_v1_upper_ll, h_v2_upper_ll, h_lower_ll, h_v1_lower_ll, h_v2_lower_ll, _ = u_ll - h_upper_rr, h_v1_upper_rr, h_v2_upper_rr, h_lower_rr, h_v1_lower_rr, h_v2_lower_rr, _ = u_rr - - # Get the velocities on either side - v1_upper_ll, v2_upper_ll, v1_lower_ll, v2_lower_ll = velocity(u_ll, equations) - v1_upper_rr, v2_upper_rr, v1_lower_rr, v2_lower_rr = velocity(u_rr, equations) - - # Average each factor of products in flux - v1_upper_avg = 0.5 * (v1_upper_ll + v1_upper_rr) - v1_lower_avg = 0.5 * (v1_lower_ll + v1_lower_rr) - v2_upper_avg = 0.5 * (v2_upper_ll + v2_upper_rr) - v2_lower_avg = 0.5 * (v2_lower_ll + v2_lower_rr) - p_upper_avg = 0.5 * equations.gravity * h_upper_ll * h_upper_rr - p_lower_avg = 0.5 * equations.gravity * h_lower_ll * h_lower_rr - - # Calculate fluxes depending on orientation - if orientation == 1 - f1 = 0.5 * (h_v1_upper_ll + h_v1_upper_rr) - f2 = f1 * v1_upper_avg + p_upper_avg - f3 = f1 * v2_upper_avg - f4 = 0.5 * (h_v1_lower_ll + h_v1_lower_rr) - f5 = f4 * v1_lower_avg + p_lower_avg - f6 = f4 * v2_lower_avg - else - f1 = 0.5 * (h_v2_upper_ll + h_v2_upper_rr) - f2 = f1 * v1_upper_avg - f3 = f1 * v2_upper_avg + p_upper_avg - f4 = 0.5 * (h_v2_lower_ll + h_v2_lower_rr) - f5 = f4 * v1_lower_avg - f6 = f4 * v2_lower_avg + p_lower_avg - end - - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u_ll))) -end - -@inline function flux_wintermeyer_etal(u_ll, u_rr, - normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, h_v1_upper_ll, h_v2_upper_ll, h_lower_ll, h_v1_lower_ll, h_v2_lower_ll, _ = u_ll - h_upper_rr, h_v1_upper_rr, h_v2_upper_rr, h_lower_rr, h_v1_lower_rr, h_v2_lower_rr, _ = u_rr - - # Get the velocities on either side - v1_upper_ll, v2_upper_ll, v1_lower_ll, v2_lower_ll = velocity(u_ll, equations) - v1_upper_rr, v2_upper_rr, v1_lower_rr, v2_lower_rr = velocity(u_rr, equations) - - # Average each factor of products in flux - v1_upper_avg = 0.5 * (v1_upper_ll + v1_upper_rr) - v1_lower_avg = 0.5 * (v1_lower_ll + v1_lower_rr) - v2_upper_avg = 0.5 * (v2_upper_ll + v2_upper_rr) - v2_lower_avg = 0.5 * (v2_lower_ll + v2_lower_rr) - p_upper_avg = 0.5 * equations.gravity * h_upper_ll * h_upper_rr - p_lower_avg = 0.5 * equations.gravity * h_lower_ll * h_lower_rr - h_v1_upper_avg = 0.5 * (h_v1_upper_ll + h_v1_upper_rr) - h_v2_upper_avg = 0.5 * (h_v2_upper_ll + h_v2_upper_rr) - h_v1_lower_avg = 0.5 * (h_v1_lower_ll + h_v1_lower_rr) - h_v2_lower_avg = 0.5 * (h_v2_lower_ll + h_v2_lower_rr) - - # Calculate fluxes depending on normal_direction - f1 = h_v1_upper_avg * normal_direction[1] + h_v2_upper_avg * normal_direction[2] - f2 = f1 * v1_upper_avg + p_upper_avg * normal_direction[1] - f3 = f1 * v2_upper_avg + p_upper_avg * normal_direction[2] - f4 = h_v1_lower_avg * normal_direction[1] + h_v2_lower_avg * normal_direction[2] - f5 = f4 * v1_lower_avg + p_lower_avg * normal_direction[1] - f6 = f4 * v2_lower_avg + p_lower_avg * normal_direction[2] - - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u_ll))) -end - -""" - flux_es_ersing_etal(u_ll, u_rr, orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations2D) - -Entropy stable surface flux for the two-layer shallow water equations. Uses the entropy conservative -[`flux_wintermeyer_etal`](@ref) and adds a Lax-Friedrichs type dissipation dependent on the jump of -entropy variables. - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_es_ersing_etal(u_ll, u_rr, - orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations2D) - # Compute entropy conservative flux but without the bottom topography - f_ec = flux_wintermeyer_etal(u_ll, u_rr, - orientation_or_normal_direction, - equations) - - # Get maximum signal velocity - λ = max_abs_speed_naive(u_ll, u_rr, orientation_or_normal_direction, equations) - - # Get entropy variables but without the bottom topography - q_rr = cons2entropy(u_rr, equations) - q_ll = cons2entropy(u_ll, equations) - - # Average values from left and right - u_avg = (u_ll + u_rr) / 2 - - # Introduce variables for better readability - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - g = equations.gravity - drho = rho_upper - rho_lower - - # Compute entropy Jacobian coefficients - h11 = -rho_lower / (g * rho_upper * drho) - h12 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h13 = -rho_lower * u_avg[3] / (g * rho_upper * u_avg[1] * drho) - h14 = 1.0 / (g * drho) - h15 = u_avg[5] / (g * u_avg[4] * drho) - h16 = u_avg[6] / (g * u_avg[4] * drho) - h21 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h22 = ((g * rho_upper * u_avg[1]^3 - g * rho_lower * u_avg[1]^3 + - -rho_lower * u_avg[2]^2) / (g * rho_upper * u_avg[1]^2 * drho)) - h23 = -rho_lower * u_avg[2] * u_avg[3] / (g * rho_upper * u_avg[1]^2 * drho) - h24 = u_avg[2] / (g * u_avg[1] * drho) - h25 = u_avg[2] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h26 = u_avg[2] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h31 = -rho_lower * u_avg[3] / (g * rho_upper * u_avg[1] * drho) - h32 = -rho_lower * u_avg[2] * u_avg[3] / (g * rho_upper * u_avg[1]^2 * drho) - h33 = ((g * rho_upper * u_avg[1]^3 - g * rho_lower * u_avg[1]^3 + - -rho_lower * u_avg[3]^2) / (g * rho_upper * u_avg[1]^2 * drho)) - h34 = u_avg[3] / (g * u_avg[1] * drho) - h35 = u_avg[3] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h36 = u_avg[3] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h41 = 1.0 / (g * drho) - h42 = u_avg[2] / (g * u_avg[1] * drho) - h43 = u_avg[3] / (g * u_avg[1] * drho) - h44 = -1.0 / (g * drho) - h45 = -u_avg[5] / (g * u_avg[4] * drho) - h46 = -u_avg[6] / (g * u_avg[4] * drho) - h51 = u_avg[5] / (g * u_avg[4] * drho) - h52 = u_avg[2] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h53 = u_avg[3] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h54 = -u_avg[5] / (g * u_avg[4] * drho) - h55 = ((g * rho_upper * u_avg[4]^3 - g * rho_lower * u_avg[4]^3 + - -rho_lower * u_avg[5]^2) / (g * rho_lower * u_avg[4]^2 * drho)) - h56 = -u_avg[5] * u_avg[6] / (g * u_avg[4]^2 * drho) - h61 = u_avg[6] / (g * u_avg[4] * drho) - h62 = u_avg[2] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h63 = u_avg[3] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h64 = -u_avg[6] / (g * u_avg[4] * drho) - h65 = -u_avg[5] * u_avg[6] / (g * u_avg[4]^2 * drho) - h66 = ((g * rho_upper * u_avg[4]^3 - g * rho_lower * u_avg[4]^3 + - -rho_lower * u_avg[6]^2) / (g * rho_lower * u_avg[4]^2 * drho)) - - # Entropy Jacobian matrix - H = @SMatrix [[h11;; h12;; h13;; h14;; h15;; h16;; 0]; - [h21;; h22;; h23;; h24;; h25;; h26;; 0]; - [h31;; h32;; h33;; h34;; h35;; h36;; 0]; - [h41;; h42;; h43;; h44;; h45;; h46;; 0]; - [h51;; h52;; h53;; h54;; h55;; h56;; 0]; - [h61;; h62;; h63;; h64;; h65;; h66;; 0]; - [0;; 0;; 0;; 0;; 0;; 0;; 0]] - - # Add dissipation to entropy conservative flux to obtain entropy stable flux - f_es = f_ec - 0.5 * λ * H * (q_rr - q_ll) - - return SVector(f_es[1], f_es[2], f_es[3], f_es[4], f_es[5], f_es[6], - zero(eltype(u_ll))) -end - -# Calculate approximation for maximum wave speed for local Lax-Friedrichs-type dissipation as the -# maximum velocity magnitude plus the maximum speed of sound. This function uses approximate -# eigenvalues using the speed of the barotropic mode as there is no simple way to calculate them -# analytically. -# -# A good overview of the derivation is given in: -# - Jonas Nycander, Andrew McC. Hogg, Leela M. Frankcombe (2008) -# Open boundary conditions for nonlinear channel Flows -# [DOI: 10.1016/j.ocemod.2008.06.003](https://doi.org/10.1016/j.ocemod.2008.06.003) -@inline function max_abs_speed_naive(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, h_v1_upper_ll, h_v2_upper_ll, h_lower_ll, h_v1_lower_ll, h_v2_lower_ll, _ = u_ll - h_upper_rr, h_v1_upper_rr, h_v2_upper_rr, h_lower_rr, h_v1_lower_rr, h_v2_lower_rr, _ = u_rr - - # Calculate averaged velocity of both layers - if orientation == 1 - v_m_ll = (h_v1_upper_ll + h_v1_lower_ll) / (h_upper_ll + h_lower_ll) - v_m_rr = (h_v1_upper_rr + h_v1_lower_rr) / (h_upper_rr + h_lower_rr) - else - v_m_ll = (h_v2_upper_ll + h_v2_lower_ll) / (h_upper_ll + h_lower_ll) - v_m_rr = (h_v2_upper_rr + h_v2_lower_rr) / (h_upper_rr + h_lower_rr) - end - - # Calculate the wave celerity on the left and right - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - - c_ll = sqrt(equations.gravity * (h_upper_ll + h_lower_ll)) - c_rr = sqrt(equations.gravity * (h_upper_rr + h_lower_rr)) - - return (max(abs(v_m_ll), abs(v_m_rr)) + max(c_ll, c_rr)) -end - -@inline function max_abs_speed_naive(u_ll, u_rr, - normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, _, _, h_lower_ll, _, _, _ = u_ll - h_upper_rr, _, _, h_lower_rr, _, _, _ = u_rr - - # Extract and compute the velocities in the normal direction - v1_upper_ll, v2_upper_ll, v1_lower_ll, v2_lower_ll = velocity(u_ll, equations) - v1_upper_rr, v2_upper_rr, v1_lower_rr, v2_lower_rr = velocity(u_rr, equations) - - v_upper_dot_n_ll = v1_upper_ll * normal_direction[1] + - v2_upper_ll * normal_direction[2] - v_upper_dot_n_rr = v1_upper_rr * normal_direction[1] + - v2_upper_rr * normal_direction[2] - v_lower_dot_n_ll = v1_lower_ll * normal_direction[1] + - v2_lower_ll * normal_direction[2] - v_lower_dot_n_rr = v1_lower_rr * normal_direction[1] + - v2_lower_rr * normal_direction[2] - - # Calculate averaged velocity of both layers - v_m_ll = (v_upper_dot_n_ll * h_upper_ll + v_lower_dot_n_ll * h_lower_ll) / - (h_upper_ll + h_lower_ll) - v_m_rr = (v_upper_dot_n_rr * h_upper_rr + v_lower_dot_n_rr * h_lower_rr) / - (h_upper_rr + h_lower_rr) - - # Compute the wave celerity on the left and right - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - - c_ll = sqrt(equations.gravity * (h_upper_ll + h_lower_ll)) - c_rr = sqrt(equations.gravity * (h_upper_rr + h_lower_rr)) - - # The normal velocities are already scaled by the norm - return max(abs(v_m_ll), abs(v_m_rr)) + max(c_ll, c_rr) * norm(normal_direction) -end - -# Specialized `DissipationLocalLaxFriedrichs` to avoid spurious dissipation in the bottom topography -@inline function (dissipation::DissipationLocalLaxFriedrichs)(u_ll, u_rr, - orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations2D) - λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, - equations) - diss = -0.5 * λ * (u_rr - u_ll) - return SVector(diss[1], diss[2], diss[3], diss[4], diss[5], diss[6], - zero(eltype(u_ll))) -end - -# Absolute speed of the barotropic mode -@inline function max_abs_speeds(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, _ = u - - # Calculate averaged velocity of both layers - v1_m = (h_v1_upper + h_v1_lower) / (h_upper + h_lower) - v2_m = (h_v2_upper + h_v2_lower) / (h_upper + h_lower) - - h_upper, h_lower = waterheight(u, equations) - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - c = sqrt(equations.gravity * (h_upper + h_lower)) - return (max(abs(v1_m) + c, abs(v1_upper), abs(v1_lower)), - max(abs(v2_m) + c, abs(v2_upper), abs(v2_lower))) -end - -# Helper function to extract the velocity vector from the conservative variables -@inline function velocity(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, _ = u - - v1_upper = h_v1_upper / h_upper - v2_upper = h_v2_upper / h_upper - v1_lower = h_v1_lower / h_lower - v2_lower = h_v2_lower / h_lower - - return SVector(v1_upper, v2_upper, v1_lower, v2_lower) -end - -# Convert conservative variables to primitive -@inline function cons2prim(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, _, _, h_lower, _, _, b = u - - H_lower = h_lower + b - H_upper = h_lower + h_upper + b - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - return SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b) -end - -# Convert conservative variables to entropy variables -# Note, only the first four are the entropy variables, the fifth entry still just carries the bottom -# topography values for convenience. -# In contrast to general usage the entropy variables are denoted with q instead of w, because w is -# already used for velocity in y-Direction -@inline function cons2entropy(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, _, _, h_lower, _, _, b = u - # Assign new variables for better readability - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - w1 = (rho_upper * (equations.gravity * (h_upper + h_lower + b) + - -0.5 * (v1_upper^2 + v2_upper^2))) - w2 = rho_upper * v1_upper - w3 = rho_upper * v2_upper - w4 = (rho_lower * (equations.gravity * (equations.r * h_upper + h_lower + b) + - -0.5 * (v1_lower^2 + v2_lower^2))) - w5 = rho_lower * v1_lower - w6 = rho_lower * v2_lower - return SVector(w1, w2, w3, w4, w5, w6, b) -end - -# Convert primitive to conservative variables -@inline function prim2cons(prim, equations::ShallowWaterTwoLayerEquations2D) - H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b = prim - - h_lower = H_lower - b - h_upper = H_upper - h_lower - b - h_v1_upper = h_upper * v1_upper - h_v2_upper = h_upper * v2_upper - h_v1_lower = h_lower * v1_lower - h_v2_lower = h_lower * v2_lower - return SVector(h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, b) -end - -@inline function waterheight(u, equations::ShallowWaterTwoLayerEquations2D) - return SVector(u[1], u[4]) -end - -# Entropy function for the shallow water equations is the total energy -@inline function entropy(cons, equations::ShallowWaterTwoLayerEquations2D) - energy_total(cons, equations) -end - -# Calculate total energy for a conservative state `cons` -@inline function energy_total(cons, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v2_lower, h_v2_lower, b = cons - g = equations.gravity - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - - e = (0.5 * rho_upper * - (h_v1_upper^2 / h_upper + h_v2_upper^2 / h_upper + g * h_upper^2) + - 0.5 * rho_lower * - (h_v2_lower^2 / h_lower + h_v2_lower^2 / h_lower + g * h_lower^2) + - g * rho_lower * h_lower * b + g * rho_upper * h_upper * (h_lower + b)) - return e -end - -# Calculate kinetic energy for a conservative state `cons` -@inline function energy_kinetic(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v2_lower, h_v2_lower, _ = u - - return (0.5 * equations.rho_upper * h_v1_upper^2 / h_upper + - 0.5 * equations.rho_upper * h_v2_upper^2 / h_upper + - 0.5 * equations.rho_lower * h_v2_lower^2 / h_lower + - 0.5 * equations.rho_lower * h_v2_lower^2 / h_lower) -end - -# Calculate potential energy for a conservative state `cons` -@inline function energy_internal(cons, equations::ShallowWaterTwoLayerEquations2D) - return energy_total(cons, equations) - energy_kinetic(cons, equations) -end - -# Calculate the error for the "lake-at-rest" test case where H = h_upper+h_lower+b should -# be a constant value over time -@inline function lake_at_rest_error(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, _, _, h_lower, _, _, b = u - return abs(equations.H0 - (h_upper + h_lower + b)) -end -end # @muladd diff --git a/src/solvers/dgsem_tree/indicators.jl b/src/solvers/dgsem_tree/indicators.jl index bb9109f2762..9f25a6d2dbb 100644 --- a/src/solvers/dgsem_tree/indicators.jl +++ b/src/solvers/dgsem_tree/indicators.jl @@ -101,82 +101,6 @@ function Base.show(io::IO, ::MIME"text/plain", indicator::IndicatorHennemannGass summary_box(io, "IndicatorHennemannGassner", setup) end -# TODO: TrixiShallowWater: move the new indicator and all associated routines to the new package -""" - IndicatorHennemannGassnerShallowWater(equations::AbstractEquations, basis; - alpha_max=0.5, - alpha_min=0.001, - alpha_smooth=true, - variable) - -Modified version of the [`IndicatorHennemannGassner`](@ref) -indicator used for shock-capturing for shallow water equations. After -the element-wise values for the blending factors are computed an additional check -is made to see if the element is partially wet. In this case, partially wet elements -are set to use the pure finite volume scheme that is guaranteed to be well-balanced -for this wet/dry transition state of the flow regime. - -See also [`VolumeIntegralShockCapturingHG`](@ref). - -## References - -- Hennemann, Gassner (2020) - "A provably entropy stable subcell shock capturing approach for high order split form DG" - [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044) -""" -struct IndicatorHennemannGassnerShallowWater{RealT <: Real, Variable, Cache} <: - AbstractIndicator - alpha_max::RealT - alpha_min::RealT - alpha_smooth::Bool - variable::Variable - cache::Cache -end - -# this method is used when the indicator is constructed as for shock-capturing volume integrals -# of the shallow water equations -# It modifies the shock-capturing indicator to use full FV method in dry cells -function IndicatorHennemannGassnerShallowWater(equations::AbstractShallowWaterEquations, - basis; - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable) - alpha_max, alpha_min = promote(alpha_max, alpha_min) - cache = create_cache(IndicatorHennemannGassner, equations, basis) - IndicatorHennemannGassnerShallowWater{typeof(alpha_max), typeof(variable), - typeof(cache)}(alpha_max, alpha_min, - alpha_smooth, variable, cache) -end - -function Base.show(io::IO, indicator::IndicatorHennemannGassnerShallowWater) - @nospecialize indicator # reduce precompilation time - - print(io, "IndicatorHennemannGassnerShallowWater(") - print(io, indicator.variable) - print(io, ", alpha_max=", indicator.alpha_max) - print(io, ", alpha_min=", indicator.alpha_min) - print(io, ", alpha_smooth=", indicator.alpha_smooth) - print(io, ")") -end - -function Base.show(io::IO, ::MIME"text/plain", - indicator::IndicatorHennemannGassnerShallowWater) - @nospecialize indicator # reduce precompilation time - - if get(io, :compact, false) - show(io, indicator) - else - setup = [ - "indicator variable" => indicator.variable, - "max. α" => indicator.alpha_max, - "min. α" => indicator.alpha_min, - "smooth α" => (indicator.alpha_smooth ? "yes" : "no"), - ] - summary_box(io, "IndicatorHennemannGassnerShallowWater", setup) - end -end - function (indicator_hg::IndicatorHennemannGassner)(u, mesh, equations, dg::DGSEM, cache; kwargs...) @unpack alpha_smooth = indicator_hg diff --git a/src/solvers/dgsem_tree/indicators_1d.jl b/src/solvers/dgsem_tree/indicators_1d.jl index dff87bfe06c..4796ddcc602 100644 --- a/src/solvers/dgsem_tree/indicators_1d.jl +++ b/src/solvers/dgsem_tree/indicators_1d.jl @@ -24,115 +24,6 @@ function create_cache(typ::Type{IndicatorHennemannGassner}, mesh, create_cache(typ, equations, dg.basis) end -# Modified indicator for ShallowWaterEquations1D to apply full FV method on cells -# containing some "dry" LGL nodes. That is, if an element is partially "wet" then it becomes a -# full FV element. -# -# TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, - 3}, - mesh, - equations::ShallowWaterEquations1D, - dg::DGSEM, cache; - kwargs...) - @unpack alpha_max, alpha_min, alpha_smooth, variable = indicator_hg - @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded = indicator_hg.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - # magic parameters - threshold = 0.5 * 10^(-1.8 * (nnodes(dg))^0.25) - parameter_s = log((1 - 0.0001) / 0.0001) - - # If the water height `h` at one LGL node is lower than `threshold_partially_wet` - # the indicator sets the element-wise blending factor alpha[element] = 1 - # via the local variable `indicator_wet`. In turn, this ensures that a pure - # FV method is used in partially wet cells and guarantees the well-balanced property. - # - # Hard-coded cut-off value of `threshold_partially_wet = 1e-4` was determined through many numerical experiments. - # Overall idea is to increase robustness when computing the velocity on (nearly) dry cells which - # could be "dangerous" due to division of conservative variables, e.g., v = hv / h. - # Here, the impact of the threshold on the number of cells being updated with FV is not that - # significant. However, its impact on the robustness is very significant. - # The value can be seen as a trade-off between accuracy and stability. - # Well-balancedness of the scheme on partially wet cells with hydrostatic reconstruction - # can only be proven for the FV method (see Chen and Noelle). - # Therefore we set alpha to one regardless of its given maximum value. - threshold_partially_wet = 1e-4 - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - modal = modal_threaded[Threads.threadid()] - - # (Re-)set dummy variable for alpha_dry - indicator_wet = 1 - - # Calculate indicator variables at Gauss-Lobatto nodes - for i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, element) - h, _, _ = u_local - - if h <= threshold_partially_wet - indicator_wet = 0 - end - - indicator[i] = indicator_hg.variable(u_local, equations) - end - - # Convert to modal representation - multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre, - indicator) - - # Calculate total energies for all modes, without highest, without two highest - total_energy = zero(eltype(modal)) - for i in 1:nnodes(dg) - total_energy += modal[i]^2 - end - total_energy_clip1 = zero(eltype(modal)) - for i in 1:(nnodes(dg) - 1) - total_energy_clip1 += modal[i]^2 - end - total_energy_clip2 = zero(eltype(modal)) - for i in 1:(nnodes(dg) - 2) - total_energy_clip2 += modal[i]^2 - end - - # Calculate energy in higher modes - energy = max((total_energy - total_energy_clip1) / total_energy, - (total_energy_clip1 - total_energy_clip2) / total_energy_clip1) - - alpha_element = 1 / (1 + exp(-parameter_s / threshold * (energy - threshold))) - - # Take care of the case close to pure DG - if alpha_element < alpha_min - alpha_element = zero(alpha_element) - end - - # Take care of the case close to pure FV - if alpha_element > 1 - alpha_min - alpha_element = one(alpha_element) - end - - # Clip the maximum amount of FV allowed or set to one depending on indicator_wet - if indicator_wet == 0 - alpha[element] = 1 - else # Element is not defined as dry but wet - alpha[element] = min(alpha_max, alpha_element) - end - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end - # Use this function barrier and unpack inside to avoid passing closures to Polyester.jl # with @batch (@threaded). # Otherwise, @threaded does not work here with Julia ARM on macOS. diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl index fa8ed481eb9..665d2254e5d 100644 --- a/src/solvers/dgsem_tree/indicators_2d.jl +++ b/src/solvers/dgsem_tree/indicators_2d.jl @@ -28,116 +28,6 @@ function create_cache(typ::Type{IndicatorHennemannGassner}, mesh, create_cache(typ, equations, dg.basis) end -# Modified indicator for ShallowWaterEquations2D to apply full FV method on cells -# containing some "dry" LGL nodes. That is, if an element is partially "wet" then it becomes a -# full FV element. -# -# TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, - 4}, - mesh, - equations::ShallowWaterEquations2D, - dg::DGSEM, cache; - kwargs...) - @unpack alpha_max, alpha_min, alpha_smooth, variable = indicator_hg - @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded, modal_tmp1_threaded = indicator_hg.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - # magic parameters - threshold = 0.5 * 10^(-1.8 * (nnodes(dg))^0.25) - parameter_s = log((1 - 0.0001) / 0.0001) - - # If the water height `h` at one LGL node is lower than `threshold_partially_wet` - # the indicator sets the element-wise blending factor alpha[element] = 1 - # via the local variable `indicator_wet`. In turn, this ensures that a pure - # FV method is used in partially wet cells and guarantees the well-balanced property. - # - # Hard-coded cut-off value of `threshold_partially_wet = 1e-4` was determined through many numerical experiments. - # Overall idea is to increase robustness when computing the velocity on (nearly) dry cells which - # could be "dangerous" due to division of conservative variables, e.g., v1 = hv1 / h. - # Here, the impact of the threshold on the number of cells being updated with FV is not that - # significant. However, its impact on the robustness is very significant. - # The value can be seen as a trade-off between accuracy and stability. - # Well-balancedness of the scheme on partially wet cells with hydrostatic reconstruction - # can only be proven for the FV method (see Chen and Noelle). - # Therefore we set alpha to be one regardless of its given value from the modal indicator. - threshold_partially_wet = 1e-4 - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - modal = modal_threaded[Threads.threadid()] - modal_tmp1 = modal_tmp1_threaded[Threads.threadid()] - - # (Re-)set dummy variable for alpha_dry - indicator_wet = 1 - - # Calculate indicator variables at Gauss-Lobatto nodes - for j in eachnode(dg), i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, j, element) - h, _, _, _ = u_local - - if h <= threshold_partially_wet - indicator_wet = 0 - end - - indicator[i, j] = indicator_hg.variable(u_local, equations) - end - - # Convert to modal representation - multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre, - indicator, modal_tmp1) - - # Calculate total energies for all modes, without highest, without two highest - total_energy = zero(eltype(modal)) - for j in 1:nnodes(dg), i in 1:nnodes(dg) - total_energy += modal[i, j]^2 - end - total_energy_clip1 = zero(eltype(modal)) - for j in 1:(nnodes(dg) - 1), i in 1:(nnodes(dg) - 1) - total_energy_clip1 += modal[i, j]^2 - end - total_energy_clip2 = zero(eltype(modal)) - for j in 1:(nnodes(dg) - 2), i in 1:(nnodes(dg) - 2) - total_energy_clip2 += modal[i, j]^2 - end - - # Calculate energy in higher modes - energy = max((total_energy - total_energy_clip1) / total_energy, - (total_energy_clip1 - total_energy_clip2) / total_energy_clip1) - - alpha_element = 1 / (1 + exp(-parameter_s / threshold * (energy - threshold))) - - # Take care of the case close to pure DG - if alpha_element < alpha_min - alpha_element = zero(alpha_element) - end - - # Take care of the case close to pure FV - if alpha_element > 1 - alpha_min - alpha_element = one(alpha_element) - end - - # Clip the maximum amount of FV allowed or set to 1 depending on indicator_wet - if indicator_wet == 0 - alpha[element] = 1 - else # Element is not defined as dry but wet - alpha[element] = min(alpha_max, alpha_element) - end - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end - # Use this function barrier and unpack inside to avoid passing closures to Polyester.jl # with @batch (@threaded). # Otherwise, @threaded does not work here with Julia ARM on macOS. diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 522510a42e3..f5fb169033a 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -1,7 +1,5 @@ module TestExamplesStructuredMesh2D -# TODO: TrixiShallowWater: move any wet/dry tests to new package - using Test using Trixi @@ -907,82 +905,6 @@ end end end -@trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_well_balanced_wet_dry.jl"), - l2=[ - 0.019731646454942086, - 1.0694532773278277e-14, - 1.1969913383405568e-14, - 0.0771517260037954, - ], - linf=[ - 0.4999999999998892, - 6.067153702623552e-14, - 4.4849667259339357e-14, - 1.9999999999999993, - ], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_conical_island.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_conical_island.jl"), - l2=[ - 0.04593154164306353, - 0.1644534881916908, - 0.16445348819169076, - 0.0011537702354532122, - ], - linf=[ - 0.21100717610846442, - 0.9501592344310412, - 0.950159234431041, - 0.021790250683516296, - ], - tspan=(0.0, 0.025)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"), - l2=[ - 0.00015285369980313484, - 1.9536806395943226e-5, - 9.936906607758672e-5, - 5.0686313334616055e-15, - ], - linf=[ - 0.003316119030459211, - 0.0005075409427972817, - 0.001986721761060583, - 4.701794509287538e-14, - ], - tspan=(0.0, 0.025), cells_per_dimension=(40, 40)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_mhd_ec_shockcapturing.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_ec_shockcapturing.jl"), l2=[0.0364192725149364, 0.0426667193422069, 0.04261673001449095, diff --git a/test/test_tree_1d.jl b/test/test_tree_1d.jl index 8b470278ffd..4a25a51a45e 100644 --- a/test/test_tree_1d.jl +++ b/test/test_tree_1d.jl @@ -42,8 +42,6 @@ isdir(outdir) && rm(outdir, recursive = true) # Shallow water include("test_tree_1d_shallowwater.jl") - # Two-layer Shallow Water - include("test_tree_1d_shallowwater_twolayer.jl") # FDSBP methods on the TreeMesh include("test_tree_1d_fdsbp.jl") diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl index 2269e858928..f9be63b87fd 100644 --- a/test/test_tree_1d_shallowwater.jl +++ b/test/test_tree_1d_shallowwater.jl @@ -1,7 +1,5 @@ module TestExamples1DShallowWater -# TODO: TrixiShallowWater: move any wet/dry tests to new package - using Test using Trixi @@ -119,32 +117,6 @@ end end end -@trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl with FluxHydrostaticReconstruction" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_well_balanced_wet_dry.jl"), - l2=[ - 0.00965787167169024, - 5.345454081916856e-14, - 0.03857583749209928, - ], - linf=[ - 0.4999999999998892, - 2.2447689894899726e-13, - 1.9999999999999714, - ], - tspan=(0.0, 0.25), - # Soften the tolerance as test results vary between different CPUs - atol=1000 * eps()) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallowwater_source_terms.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ @@ -339,53 +311,6 @@ end end end -@trixi_testset "elixir_shallowwater_beach.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_beach.jl"), - l2=[ - 0.17979210479598923, - 1.2377495706611434, - 6.289818963361573e-8, - ], - linf=[ - 0.845938394800688, - 3.3740800777086575, - 4.4541473087633676e-7, - ], - tspan=(0.0, 0.05), - atol=3e-10) # see https://github.com/trixi-framework/Trixi.jl/issues/1617 - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"), - l2=[ - 8.965981683033589e-5, - 1.8565707397810857e-5, - 4.1043039226164336e-17, - ], - linf=[ - 0.00041080213807871235, - 0.00014823261488938177, - 2.220446049250313e-16, - ], - tspan=(0.0, 0.05)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallow_water_quasi_1d_source_terms.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallow_water_quasi_1d_source_terms.jl"), diff --git a/test/test_tree_1d_shallowwater_twolayer.jl b/test/test_tree_1d_shallowwater_twolayer.jl deleted file mode 100644 index 180fb3ec3b3..00000000000 --- a/test/test_tree_1d_shallowwater_twolayer.jl +++ /dev/null @@ -1,74 +0,0 @@ -module TestExamples1DShallowWaterTwoLayer - -# TODO: TrixiShallowWater: move two layer tests to new package - -using Test -using Trixi - -include("test_trixi.jl") - -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") - -@testset "Shallow Water Two layer" begin - @trixi_testset "elixir_shallowwater_twolayer_convergence.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_convergence.jl"), - l2=[0.005012009872109003, 0.002091035326731071, - 0.005049271397924551, - 0.0024633066562966574, 0.0004744186597732739], - linf=[0.0213772149343594, 0.005385752427290447, - 0.02175023787351349, - 0.008212004668840978, 0.0008992474511784199], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_well_balanced.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[8.949288784402005e-16, 4.0636427176237915e-17, - 0.001002881985401548, - 2.133351105037203e-16, 0.0010028819854016578], - linf=[2.6229018956769323e-15, 1.878451903240623e-16, - 0.005119880996670156, - 8.003199803957679e-16, 0.005119880996670666], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_dam_break.jl with flux_lax_friedrichs" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_dam_break.jl"), - l2=[0.1000774903431289, 0.5670692949571057, 0.08764242501014498, - 0.45412307886094555, 0.013638618139749523], - linf=[0.586718937495144, 2.1215606128311584, 0.5185911311186155, - 1.820382495072612, 0.5], - surface_flux=(flux_lax_friedrichs, - flux_nonconservative_ersing_etal), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end -end - -end # module diff --git a/test/test_tree_2d_part3.jl b/test/test_tree_2d_part3.jl index ce9b3bc04f8..0eff564132c 100644 --- a/test/test_tree_2d_part3.jl +++ b/test/test_tree_2d_part3.jl @@ -26,9 +26,6 @@ isdir(outdir) && rm(outdir, recursive = true) # Shallow water include("test_tree_2d_shallowwater.jl") - # Two-Layer Shallow Water - include("test_tree_2d_shallowwater_twolayer.jl") - # FDSBP methods on the TreeMesh include("test_tree_2d_fdsbp.jl") end diff --git a/test/test_tree_2d_shallowwater.jl b/test/test_tree_2d_shallowwater.jl index 1f3dfbf5267..93a8cb63667 100644 --- a/test/test_tree_2d_shallowwater.jl +++ b/test/test_tree_2d_shallowwater.jl @@ -1,7 +1,5 @@ module TestExamples2DShallowWater -# TODO: TrixiShallowWater: move any wet/dry tests to new package - using Test using Trixi @@ -145,32 +143,6 @@ end end end -@trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl with FluxHydrostaticReconstruction" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_well_balanced_wet_dry.jl"), - l2=[ - 0.030186039395610056, - 2.513287752536758e-14, - 1.3631397744897607e-16, - 0.10911781485920438, - ], - linf=[ - 0.49999999999993505, - 5.5278950497971455e-14, - 7.462550826772548e-16, - 2.0, - ], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallowwater_source_terms.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ @@ -277,57 +249,6 @@ end end end -@trixi_testset "elixir_shallowwater_conical_island.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_conical_island.jl"), - l2=[ - 0.0459315416430658, - 0.1644534881916991, - 0.16445348819169914, - 0.0011537702354532694, - ], - linf=[ - 0.21100717610846464, - 0.9501592344310412, - 0.9501592344310417, - 0.021790250683516282, - ], - tspan=(0.0, 0.025)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"), - l2=[ - 0.00025345501281482687, - 4.4525120338817177e-5, - 0.00015991819160294247, - 7.750412064917294e-15, - ], - linf=[ - 0.004664246019836723, - 0.0004972780116736669, - 0.0028735707270457628, - 6.866729407306593e-14, - ], - tspan=(0.0, 0.025), - basis=LobattoLegendreBasis(3)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallowwater_wall.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_wall.jl"), l2=[ diff --git a/test/test_tree_2d_shallowwater_twolayer.jl b/test/test_tree_2d_shallowwater_twolayer.jl deleted file mode 100644 index 802bf4e021c..00000000000 --- a/test/test_tree_2d_shallowwater_twolayer.jl +++ /dev/null @@ -1,88 +0,0 @@ -module TestExamples2DShallowWaterTwoLayer - -# TODO: TrixiShallowWater: move two layer tests to new package - -using Test -using Trixi - -include("test_trixi.jl") - -EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") - -@testset "Two-Layer Shallow Water" begin - @trixi_testset "elixir_shallowwater_twolayer_convergence.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_convergence.jl"), - l2=[0.0004016779699408397, 0.005466339651545468, - 0.006148841330156112, - 0.0002882339012602492, 0.0030120142442780313, - 0.002680752838455618, - 8.873630921431545e-6], - linf=[0.002788654460984752, 0.01484602033450666, - 0.017572229756493973, - 0.0016010835493927011, 0.009369847995372549, - 0.008407961775489636, - 3.361991620143279e-5], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_well_balanced.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[3.2935164267930016e-16, 4.6800825611195103e-17, - 4.843057532147818e-17, - 0.0030769233188015013, 1.4809161150389857e-16, - 1.509071695038043e-16, - 0.0030769233188014935], - linf=[2.248201624865942e-15, 2.346382070278936e-16, - 2.208565017494899e-16, - 0.026474051138910493, 9.237568031609006e-16, - 7.520758026187046e-16, - 0.026474051138910267], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_well_balanced with flux_lax_friedrichs.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[2.0525741072929735e-16, 6.000589392730905e-17, - 6.102759428478984e-17, - 0.0030769233188014905, 1.8421386173122792e-16, - 1.8473184927121752e-16, - 0.0030769233188014935], - linf=[7.355227538141662e-16, 2.960836949170518e-16, - 4.2726562436938764e-16, - 0.02647405113891016, 1.038795478061861e-15, - 1.0401789378532516e-15, - 0.026474051138910267], - surface_flux=(flux_lax_friedrichs, - flux_nonconservative_ersing_etal), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end -end - -end # module diff --git a/test/test_unit.jl b/test/test_unit.jl index c1379587cc8..1907a281718 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -420,11 +420,6 @@ end (1.0, 1.0), 1.0) @test_nowarn show(stdout, limiter_idp) - # TODO: TrixiShallowWater: move unit test - indicator_hg_swe = IndicatorHennemannGassnerShallowWater(1.0, 0.0, true, "variable", - "cache") - @test_nowarn show(stdout, indicator_hg_swe) - indicator_loehner = IndicatorLöhner(1.0, "variable", (; cache = nothing)) @test_nowarn show(stdout, indicator_loehner) @@ -543,7 +538,7 @@ end end @timed_testset "Shallow water conversion between conservative/entropy variables" begin - H, v1, v2, b = 3.5, 0.25, 0.1, 0.4 + H, v1, v2, b, a = 3.5, 0.25, 0.1, 0.4, 0.3 let equations = ShallowWaterEquations1D(gravity_constant = 9.8) cons_vars = prim2cons(SVector(H, v1, b), equations) @@ -572,6 +567,14 @@ end entropy_vars = cons2entropy(cons_vars, equations) @test cons_vars ≈ entropy2cons(entropy_vars, equations) end + + let equations = ShallowWaterEquationsQuasi1D(gravity_constant = 9.8) + cons_vars = prim2cons(SVector(H, v1, b, a), equations) + entropy_vars = cons2entropy(cons_vars, equations) + + total_energy = energy_total(cons_vars, equations) + @test entropy(cons_vars, equations) ≈ a * total_energy + end end @timed_testset "boundary_condition_do_nothing" begin @@ -697,6 +700,14 @@ end u = SVector(1, 0.5, 0.0) @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations) + u_ll = SVector(0.1, 1.0, 0.0) + u_rr = SVector(0.1, 1.0, 0.0) + @test flux_hll(u_ll, u_rr, 1, equations) ≈ flux(u_ll, 1, equations) + + u_ll = SVector(0.1, -1.0, 0.0) + u_rr = SVector(0.1, -1.0, 0.0) + @test flux_hll(u_ll, u_rr, 1, equations) ≈ flux(u_rr, 1, equations) + equations = ShallowWaterEquations2D(gravity_constant = 9.81) normal_directions = [SVector(1.0, 0.0), SVector(0.0, 1.0), @@ -707,6 +718,17 @@ end @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations) end + + normal_direction = SVector(1.0, 0.0, 0.0) + u_ll = SVector(0.1, 1.0, 1.0, 0.0) + u_rr = SVector(0.1, 1.0, 1.0, 0.0) + @test flux_hll(u_ll, u_rr, normal_direction, equations) ≈ + flux(u_ll, normal_direction, equations) + + u_ll = SVector(0.1, -1.0, -1.0, 0.0) + u_rr = SVector(0.1, -1.0, -1.0, 0.0) + @test flux_hll(u_ll, u_rr, normal_direction, equations) ≈ + flux(u_rr, normal_direction, equations) end @timed_testset "Consistency check for HLL flux (naive): MHD" begin diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 83b8318c926..04eb9f679aa 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -1,7 +1,5 @@ module TestExamplesUnstructuredMesh2D -# TODO: TrixiShallowWater: move any wet/dry and two layer tests - using Test using Trixi @@ -566,105 +564,6 @@ end end end -@trixi_testset "elixir_shallowwater_three_mound_dam_break.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_three_mound_dam_break.jl"), - l2=[ - 0.0892957892027502, - 0.30648836484407915, - 2.28712547616214e-15, - 0.0008778654298684622, - ], - linf=[ - 0.850329472915091, - 2.330631694956507, - 5.783660020252348e-14, - 0.04326237921249021, - ], - basis=LobattoLegendreBasis(3), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_twolayer_convergence.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_convergence.jl"), - l2=[0.0007935561625451243, 0.008825315509943844, - 0.002429969315645897, - 0.0007580145888686304, 0.004495741879625235, - 0.0015758146898767814, - 6.849532064729749e-6], - linf=[0.0059205195991136605, 0.08072126590166251, - 0.03463806075399023, - 0.005884818649227186, 0.042658506561995546, - 0.014125956138838602, 2.5829318284764646e-5], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_twolayer_well_balanced.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[4.706532184998499e-16, 1.1215950712872183e-15, - 6.7822712922421565e-16, - 0.002192812926266047, 5.506855295923691e-15, - 3.3105180099689275e-15, - 0.0021928129262660085], - linf=[4.468647674116255e-15, 1.3607872120431166e-14, - 9.557155049520056e-15, - 0.024280130945632084, 6.68910907640583e-14, - 4.7000983997100496e-14, - 0.024280130945632732], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_twolayer_dam_break.jl with flux_lax_friedrichs" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_dam_break.jl"), - l2=[0.012447632879122346, 0.012361250464676683, - 0.0009551519536340908, - 0.09119400061322577, 0.015276216721920347, - 0.0012126995108983853, 0.09991983966647647], - linf=[0.044305765721807444, 0.03279620980615845, - 0.010754320388190101, - 0.111309922939555, 0.03663360204931427, - 0.014332822306649284, - 0.10000000000000003], - surface_flux=(flux_lax_friedrichs, - flux_nonconservative_ersing_etal), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - # TODO: FD; for now put the unstructured tests for the 2D FDSBP here. @trixi_testset "FDSBP (central): elixir_advection_basic.jl" begin @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), From 5418274987eec6bade5432ead7c91f1d4a842610 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 23 Feb 2024 05:31:01 +0100 Subject: [PATCH 120/166] Make `min_max_speed_davis` default wave speed estimate for `FluxHLL()` (#1743) * make min_max_speed_davis default wave speed * fmt * exchange hll * exchange some min_max_speed_naive for min_max_speed_davis for some examples * fmt * add news entry * news * debug * revert unintended elixir changes * news entry * correct test vals * Update test vals * Exchange some tests * update test vals for threaded * test vals for fdsbp unstructured * fmt * tests for coverage --------- Co-authored-by: Hendrik Ranocha --- NEWS.md | 5 ++ .../elixir_eulergravity_convergence.jl | 2 +- ..._euler_source_terms_nonconforming_earth.jl | 2 +- .../elixir_euler_convergence.jl | 2 +- .../elixir_eulergravity_convergence.jl | 2 +- .../elixir_eulergravity_jeans_instability.jl | 2 +- .../elixir_eulergravity_sedov_blast_wave.jl | 2 +- .../elixir_eulergravity_convergence.jl | 2 +- ..._shallowwater_well_balanced_nonperiodic.jl | 4 +- .../elixir_eulergravity_convergence.jl | 2 +- .../elixir_shallowwater_dirichlet.jl | 4 +- src/equations/numerical_fluxes.jl | 16 +++- test/test_dgmulti_1d.jl | 10 +-- test/test_dgmulti_2d.jl | 82 +++++++----------- test/test_dgmulti_3d.jl | 71 ++++++--------- test/test_parabolic_3d.jl | 1 + test/test_special_elixirs.jl | 3 +- test/test_structured_2d.jl | 57 ++++++------ test/test_threaded.jl | 24 +++--- test/test_tree_1d_euler.jl | 8 +- test/test_tree_1d_eulergravity.jl | 10 +-- test/test_tree_1d_shallowwater.jl | 19 ++-- test/test_tree_2d_mhd.jl | 2 +- test/test_tree_2d_shallowwater.jl | 30 ++++++- test/test_tree_3d_euler.jl | 17 ++-- test/test_tree_3d_mhd.jl | 4 +- test/test_unstructured_2d.jl | 86 ++++++++----------- 27 files changed, 230 insertions(+), 239 deletions(-) diff --git a/NEWS.md b/NEWS.md index ecc91581e9a..d70504d8c85 100644 --- a/NEWS.md +++ b/NEWS.md @@ -10,6 +10,9 @@ for human readability. #### Changed +- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` + instead of `min_max_speed_naive`. + #### Deprecated #### Removed @@ -17,6 +20,7 @@ for human readability. Trixi.jl, but are moved to a dedicated repository: [TrixiShallowWater.jl](https://github.com/trixi-framework/TrixiShallowWater.jl). This includes all features related to wetting and drying, as well as the `ShallowWaterTwoLayerEquations1D` and `ShallowWaterTwoLayerEquations2D`. However, the basic shallow water equations are still part of Trixi.jl. We'll also be updating the TrixiShallowWater.jl documentation with instructions on how to use these relocated features in the future. + ## Changes in the v0.6 lifecycle #### Added @@ -27,6 +31,7 @@ for human readability. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` - Added Lighthill-Whitham-Richards (LWR) traffic model + ## Changes when updating to v0.6 from v0.5.x #### Added diff --git a/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl index d55a59ca5ce..974466e3b3b 100644 --- a/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl @@ -10,7 +10,7 @@ gamma = 2.0 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl index 28a300cd681..28cdec12da5 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl @@ -68,7 +68,7 @@ boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = Dict(:inside => boundary_condition, :outside => boundary_condition) -surface_flux = flux_hll +surface_flux = FluxHLL(min_max_speed_naive) # Note that a free stream is not preserved if N < 2 * N_geo, where N is the # polydeg of the solver and N_geo is the polydeg of the mesh. # However, the FSP error is negligible in this example. diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl index aabfce0f66b..4f44d7b12ac 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl @@ -8,7 +8,7 @@ equations = CompressibleEulerEquations2D(2.0) initial_condition = initial_condition_eoc_test_coupled_euler_gravity -solver = DGSEM(polydeg = 3, surface_flux = flux_hll) +solver = DGSEM(polydeg = 3, surface_flux = FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl index ce1d2cd05bd..49b98803577 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl @@ -10,7 +10,7 @@ gamma = 2.0 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl index f081f6bb91a..7461198fbb2 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl @@ -66,7 +66,7 @@ gamma = 5 / 3 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (1.0, 1.0) diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl index b7be2320228..bc7ceb97c8b 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl @@ -85,7 +85,7 @@ function boundary_condition_sedov_self_gravity(u_inner, orientation, direction, end boundary_conditions = boundary_condition_sedov_self_gravity -surface_flux = flux_hll +surface_flux = FluxHLL(min_max_speed_naive) volume_flux = flux_chandrashekar polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl index 98a9a5521a9..cd10315945a 100644 --- a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl @@ -9,7 +9,7 @@ gamma = 2.0 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl index e55fffc101e..9ed02c0e378 100644 --- a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl +++ b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl @@ -26,7 +26,9 @@ boundary_condition = BoundaryConditionDirichlet(initial_condition) # Get the DG approximation space volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -solver = DGSEM(polydeg = 4, surface_flux = (flux_hll, flux_nonconservative_fjordholm_etal), +solver = DGSEM(polydeg = 4, + surface_flux = (flux_hll, + flux_nonconservative_fjordholm_etal), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) ############################################################################### diff --git a/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl b/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl index 21ef661d0b6..0a8c427bf8d 100644 --- a/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl @@ -10,7 +10,7 @@ equations_euler = CompressibleEulerEquations3D(gamma) initial_condition = initial_condition_eoc_test_coupled_euler_gravity polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0, 0.0) coordinates_max = (2.0, 2.0, 2.0) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl index df1a69192ce..38e1279e220 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl @@ -30,7 +30,9 @@ boundary_condition = Dict(:OuterCircle => boundary_condition_constant) # Get the DG approximation space volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -solver = DGSEM(polydeg = 4, surface_flux = (flux_hll, flux_nonconservative_fjordholm_etal), +solver = DGSEM(polydeg = 4, + surface_flux = (flux_hll, + flux_nonconservative_fjordholm_etal), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) ############################################################################### diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 87fcb412244..6794c71a32b 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -222,12 +222,12 @@ See [`FluxLaxFriedrichs`](@ref). const flux_lax_friedrichs = FluxLaxFriedrichs() """ - FluxHLL(min_max_speed=min_max_speed_naive) + FluxHLL(min_max_speed=min_max_speed_davis) Create an HLL (Harten, Lax, van Leer) numerical flux where the minimum and maximum wave speeds are estimated as `λ_min, λ_max = min_max_speed(u_ll, u_rr, orientation_or_normal_direction, equations)`, -defaulting to [`min_max_speed_naive`](@ref). +defaulting to [`min_max_speed_davis`](@ref). Original paper: - Amiram Harten, Peter D. Lax, Bram van Leer (1983) On Upstream Differencing and Godunov-Type Schemes for Hyperbolic Conservation Laws @@ -237,7 +237,7 @@ struct FluxHLL{MinMaxSpeed} min_max_speed::MinMaxSpeed end -FluxHLL() = FluxHLL(min_max_speed_naive) +FluxHLL() = FluxHLL(min_max_speed_davis) """ min_max_speed_naive(u_ll, u_rr, orientation::Integer, equations) @@ -246,10 +246,16 @@ FluxHLL() = FluxHLL(min_max_speed_naive) Simple and fast estimate(!) of the minimal and maximal wave speed of the Riemann problem with left and right states `u_ll, u_rr`, usually based only on the local wave speeds associated to `u_ll` and `u_rr`. +Slightly more diffusive than [`min_max_speed_davis`](@ref). - Amiram Harten, Peter D. Lax, Bram van Leer (1983) On Upstream Differencing and Godunov-Type Schemes for Hyperbolic Conservation Laws [DOI: 10.1137/1025002](https://doi.org/10.1137/1025002) +See eq. (10.37) from +- Eleuterio F. Toro (2009) + Riemann Solvers and Numerical Methods for Fluid Dynamics: A Practical Introduction + [DOI: 10.1007/b79761](https://doi.org/10.1007/b79761) + See also [`FluxHLL`](@ref), [`min_max_speed_davis`](@ref), [`min_max_speed_einfeldt`](@ref). """ function min_max_speed_naive end @@ -266,6 +272,10 @@ left and right states `u_ll, u_rr`, usually based only on the local wave speeds Simplified Second-Order Godunov-Type Methods [DOI: 10.1137/0909030](https://doi.org/10.1137/0909030) +See eq. (10.38) from +- Eleuterio F. Toro (2009) + Riemann Solvers and Numerical Methods for Fluid Dynamics: A Practical Introduction + [DOI: 10.1007/b79761](https://doi.org/10.1007/b79761) See also [`FluxHLL`](@ref), [`min_max_speed_naive`](@ref), [`min_max_speed_einfeldt`](@ref). """ function min_max_speed_davis end diff --git a/test/test_dgmulti_1d.jl b/test/test_dgmulti_1d.jl index 0363086341f..e470de71efb 100644 --- a/test/test_dgmulti_1d.jl +++ b/test/test_dgmulti_1d.jl @@ -128,14 +128,12 @@ end @trixi_testset "elixir_euler_fdsbp_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), l2=[ - 9.146929180585711e-7, - 1.8997616878017292e-6, - 3.991417702211889e-6, + 9.146929178341782e-7, 1.8997616876521201e-6, + 3.991417701005622e-6, ], linf=[ - 1.7321089884614338e-6, - 3.3252888855805907e-6, - 6.5252787737613005e-6, + 1.7321089882393892e-6, 3.3252888869128583e-6, + 6.525278767988141e-6, ]) show(stdout, semi.solver.basis) show(stdout, MIME"text/plain"(), semi.solver.basis) diff --git a/test/test_dgmulti_2d.jl b/test/test_dgmulti_2d.jl index 892c8ed37f0..ab6b505e208 100644 --- a/test/test_dgmulti_2d.jl +++ b/test/test_dgmulti_2d.jl @@ -17,6 +17,7 @@ isdir(outdir) && rm(outdir, recursive = true) @trixi_testset "elixir_euler_weakform.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), cells_per_dimension=(4, 4), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ 0.0013536930300254945, @@ -44,6 +45,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), cells_per_dimension=(4, 4), approximation_type=SBP(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ 0.0074706882014934735, @@ -71,6 +73,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), cells_per_dimension=(4, 4), element_type=Quad(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ 0.00031892254415307093, @@ -184,16 +187,12 @@ end @trixi_testset "elixir_euler_bilinear.jl (Bilinear quadrilateral elements, SBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_bilinear.jl"), l2=[ - 1.0259435706215337e-5, - 9.014090233720625e-6, - 9.014090233223014e-6, - 2.738953587401793e-5, + 1.0259432774540821e-5, 9.014087689495575e-6, + 9.01408768888544e-6, 2.738953324859446e-5, ], linf=[ - 7.362609083649829e-5, - 6.874188055272512e-5, - 6.874188052830021e-5, - 0.0001912435192696904, + 7.362605996297233e-5, 6.874189724781488e-5, + 6.874189703509614e-5, 0.00019124355334110277, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -208,16 +207,12 @@ end @trixi_testset "elixir_euler_curved.jl (Quadrilateral elements, SBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), l2=[ - 1.720476068165337e-5, - 1.592168205710526e-5, - 1.592168205812963e-5, - 4.894094865697305e-5, + 1.7204593127904542e-5, 1.5921547179522804e-5, + 1.5921547180107928e-5, 4.894071422525737e-5, ], linf=[ - 0.00010525416930584619, - 0.00010003778091061122, - 0.00010003778085621029, - 0.00036426282101720275, + 0.00010525416937667842, 0.00010003778102718464, + 0.00010003778071832059, 0.0003642628211952825, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -232,6 +227,7 @@ end @trixi_testset "elixir_euler_curved.jl (Quadrilateral elements, GaussSBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), approximation_type=GaussSBP(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), l2=[ 3.4666312079259457e-6, 3.4392774480368986e-6, @@ -259,6 +255,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), element_type=Tri(), approximation_type=Polynomial(), volume_integral=VolumeIntegralWeakForm(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), l2=[ 7.905498158659466e-6, 8.731690809663625e-6, @@ -330,16 +327,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform_periodic.jl"), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ - 0.0014986508075708323, - 0.001528523420746786, - 0.0015285234207473158, - 0.004846505183839211, - ] ./ 2.0, + 0.0007492755162295128, 0.0007641875305302599, + 0.0007641875305306243, 0.0024232389721009447, + ], linf=[ - 0.0015062108658376872, - 0.0019373508504645365, - 0.0019373508504538783, - 0.004742686826709086, + 0.0015060064614331736, 0.0019371156800773726, + 0.0019371156800769285, 0.004742431684202408, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -354,16 +347,12 @@ end @trixi_testset "elixir_euler_triangulate_pkg_mesh.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_triangulate_pkg_mesh.jl"), l2=[ - 2.344080455438114e-6, - 1.8610038753097983e-6, - 2.4095165666095305e-6, - 6.373308158814308e-6, + 2.344076909832665e-6, 1.8610002398709756e-6, + 2.4095132179484066e-6, 6.37330249340445e-6, ], linf=[ - 2.5099852761334418e-5, - 2.2683684021362893e-5, - 2.6180448559287584e-5, - 5.5752932611508044e-5, + 2.509979394305084e-5, 2.2683711321080935e-5, + 2.6180377720841363e-5, 5.575278031910713e-5, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -435,16 +424,12 @@ end "elixir_euler_rayleigh_taylor_instability.jl"), cells_per_dimension=(8, 8), tspan=(0.0, 0.2), l2=[ - 0.0709665896982514, - 0.005182828752164663, - 0.013832655585206478, - 0.03247013800580221, + 0.07097806723891838, 0.005168550941966817, + 0.013820912272220933, 0.03243357220022434, ], linf=[ - 0.4783963902824797, - 0.022527207050681054, - 0.040307056293369226, - 0.0852365428206836, + 0.4783395896753895, 0.02244629340135818, + 0.04023357731088538, 0.08515807256615027, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -604,16 +589,12 @@ end @trixi_testset "elixir_euler_fdsbp_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), l2=[ - 1.3333320340010056e-6, - 2.044834627970641e-6, - 2.044834627855601e-6, - 5.282189803559564e-6, + 1.333332033888785e-6, 2.044834627786368e-6, + 2.0448346278315884e-6, 5.282189803437435e-6, ], linf=[ - 2.7000151718858945e-6, - 3.988595028259212e-6, - 3.9885950273710336e-6, - 8.848583042286862e-6, + 2.7000151703315822e-6, 3.988595025372632e-6, + 3.9885950240403645e-6, 8.848583036513702e-6, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -628,6 +609,7 @@ end @trixi_testset "elixir_euler_fdsbp_periodic.jl (arbitrary reference domain)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), xmin=-200.0, xmax=100.0, #= parameters for reference interval =# + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 1.333332034149886e-6, 2.0448346280892024e-6, @@ -659,6 +641,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), approximation_type=D, coordinates_min=(-3.0, -4.0), coordinates_max=(0.0, -1.0), + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 0.07318831033918516, 0.10039910610067465, @@ -691,6 +674,7 @@ end global D = SummationByPartsOperators.couple_continuously(D_local, mesh_local) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), approximation_type=D, + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 1.5440402410017893e-5, 1.4913189903083485e-5, diff --git a/test/test_dgmulti_3d.jl b/test/test_dgmulti_3d.jl index 3a1db255484..fa70b11447c 100644 --- a/test/test_dgmulti_3d.jl +++ b/test/test_dgmulti_3d.jl @@ -17,20 +17,15 @@ isdir(outdir) && rm(outdir, recursive = true) # 3d tet/hex tests @trixi_testset "elixir_euler_weakform.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), - # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ - 0.0010029534292051608, - 0.0011682205957721673, - 0.001072975385793516, - 0.000997247778892257, - 0.0039364354651358294, - ] ./ sqrt(8), + 0.000354593110864001, 0.00041301573702385284, + 0.00037934556184883277, 0.0003525767114354012, + 0.0013917457634530887, + ], linf=[ - 0.003660737033303718, - 0.005625620600749226, - 0.0030566354814669516, - 0.0041580358824311325, - 0.019326660236036464, + 0.0036608123230692513, 0.005625540942772123, + 0.0030565781898950206, 0.004158099048202857, + 0.01932716837214299, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -74,6 +69,7 @@ end @trixi_testset "elixir_euler_weakform.jl (Hexahedral elements)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), element_type=Hex(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ 0.00030580190715769566, @@ -102,18 +98,13 @@ end @trixi_testset "elixir_euler_curved.jl (Hex elements, SBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), l2=[ - 0.018354883045936066, - 0.024412704052042846, - 0.024408520416087945, - 0.01816314570880129, - 0.039342805507972006, + 0.01835488304593566, 0.024412704052042534, + 0.02440852041608929, 0.018163145708800853, + 0.03934280550797125, ], linf=[ - 0.14862225990775757, - 0.28952368161864683, - 0.2912054484817035, - 0.1456603133854122, - 0.3315354586775472, + 0.14862225990793032, 0.2895236816183626, 0.291205448481636, + 0.14566031338563246, 0.33153545867790246, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -129,18 +120,14 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), approximation_type=GaussSBP(), l2=[ - 0.002631131519508634, - 0.0029144224044954105, - 0.002913889110662827, - 0.002615140832314194, - 0.006881528610614373, + 0.0026311315195097097, 0.002914422404496567, + 0.0029138891106640368, 0.002615140832315232, + 0.006881528610616624, ], linf=[ - 0.020996114874140215, - 0.021314522450134543, - 0.021288322783006297, - 0.020273381695435244, - 0.052598740390024545, + 0.02099611487415931, 0.021314522450152307, + 0.021288322783027613, 0.020273381695449455, + 0.05259874039006007, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -154,20 +141,15 @@ end @trixi_testset "elixir_euler_weakform_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform_periodic.jl"), - # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ - 0.0010317074322517949, - 0.0012277090547035293, - 0.0011273991123913515, - 0.0010418496196130177, - 0.004058878478404962, - ] ./ sqrt(8), + 0.00036475807571383924, 0.00043404536371780537, + 0.0003985850214093045, 0.0003683451584072326, + 0.00143503620472638, + ], linf=[ - 0.003227752881827861, - 0.005620317864620361, - 0.0030514833972379307, - 0.003987027618439498, - 0.019282224709831652, + 0.0032278615418719347, 0.005620238272054934, + 0.0030514261010661237, 0.0039871165455998, + 0.019282771780667396, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -182,6 +164,7 @@ end @trixi_testset "elixir_euler_weakform_periodic.jl (Hexahedral elements)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform_periodic.jl"), element_type=Hex(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ 0.00034230612468547436, diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 1eaa9f51a56..863daeeaf35 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -400,6 +400,7 @@ end @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), initial_refinement_level=2, tspan=(0.0, 0.25), + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 0.0001547509861140407, 0.015637861347119624, diff --git a/test/test_special_elixirs.jl b/test/test_special_elixirs.jl index ba670a6025e..277ade9bd5c 100644 --- a/test/test_special_elixirs.jl +++ b/test/test_special_elixirs.jl @@ -286,7 +286,8 @@ end equations = CompressibleEulerEquations1D(1.4) mesh = TreeMesh((-1.0,), (1.0,), initial_refinement_level = 3, n_cells_max = 10^4) - solver = DGSEM(3, flux_hll, VolumeIntegralFluxDifferencing(flux_ranocha)) + solver = DGSEM(3, FluxHLL(min_max_speed_naive), + VolumeIntegralFluxDifferencing(flux_ranocha)) initial_condition = (x, t, equations) -> begin rho = 2 + sinpi(k * sum(x)) v1 = 0.1 diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index f5fb169033a..64a1faf05b8 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -606,16 +606,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_rayleigh_taylor_instability.jl"), l2=[ - 0.06365630381017849, - 0.007166887387738937, - 0.002878708825497772, - 0.010247678114070121, + 0.06365630515019809, 0.007166887172039836, + 0.0028787103533600804, 0.010247678008197966, ], linf=[ - 0.4799214336153155, - 0.024595483032220266, - 0.02059808120543466, - 0.03190756362943725, + 0.47992143569849377, 0.02459548251933757, + 0.02059810091623976, 0.0319077000843877, ], cells_per_dimension=(8, 8), tspan=(0.0, 0.3)) @@ -659,14 +655,12 @@ end @trixi_testset "elixir_eulerpolytropic_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"), l2=[ - 0.0016688820596537988, - 0.0025921681885685425, - 0.003280950351435014, + 0.00166898321776379, 0.00259202637930991, + 0.0032810744946276406, ], linf=[ - 0.010994679664394269, - 0.01331197845637, - 0.020080117011346488, + 0.010994883201888683, 0.013309526619369905, + 0.020080326611175536, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -678,18 +672,19 @@ end end end -@trixi_testset "elixir_eulerpolytropic_convergence.jl: HLL(Davis)" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"), +@trixi_testset "elixir_eulerpolytropic_convergence.jl with FluxHLL(min_max_speed_naive)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_eulerpolytropic_convergence.jl"), solver=DGSEM(polydeg = 3, - surface_flux = FluxHLL(min_max_speed_davis), + surface_flux = FluxHLL(min_max_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)), l2=[ - 0.0016689832177644243, 0.0025920263793104445, - 0.003281074494629298, + 0.001668882059653298, 0.002592168188567654, + 0.0032809503514328307, ], linf=[ - 0.01099488320190023, 0.013309526619350365, - 0.02008032661117909, + 0.01099467966437917, 0.013311978456333584, + 0.020080117011337606, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -727,14 +722,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_isothermal_wave.jl"), l2=[ - 0.004998778491726366, - 0.004998916000294425, - 9.259136963058664e-17, + 0.004998778512795407, 0.004998916021367992, + 8.991558055435833e-17, ], linf=[ - 0.010001103673834888, - 0.010051165098399503, - 7.623942913643681e-16, + 0.010001103632831354, 0.010051165055185603, + 7.60697457718599e-16, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -749,14 +742,12 @@ end @trixi_testset "elixir_eulerpolytropic_wave.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_wave.jl"), l2=[ - 0.23642682112204072, - 0.20904264390331334, - 8.174982691297391e-17, + 0.23642871172548174, 0.2090519382039672, + 8.778842676292274e-17, ], linf=[ - 0.4848250368349989, - 0.253350873815695, - 4.984552457753618e-16, + 0.4852276879687425, 0.25327870807625175, + 5.533921691832115e-16, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_threaded.jl b/test/test_threaded.jl index a8a1b1b425a..7365dcef21c 100644 --- a/test/test_threaded.jl +++ b/test/test_threaded.jl @@ -394,10 +394,10 @@ end "elixir_euler_curved.jl"), alg=RDPK3SpFSAL49(thread = OrdinaryDiffEq.True()), l2=[ - 1.720476068165337e-5, - 1.592168205710526e-5, - 1.592168205812963e-5, - 4.894094865697305e-5, + 1.7204593127904542e-5, + 1.5921547179522804e-5, + 1.5921547180107928e-5, + 4.894071422525737e-5, ], linf=[ 0.00010525416930584619, @@ -420,16 +420,16 @@ end @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", "elixir_euler_triangulate_pkg_mesh.jl"), l2=[ - 2.344080455438114e-6, - 1.8610038753097983e-6, - 2.4095165666095305e-6, - 6.373308158814308e-6, + 2.344076909832665e-6, + 1.8610002398709756e-6, + 2.4095132179484066e-6, + 6.37330249340445e-6, ], linf=[ - 2.5099852761334418e-5, - 2.2683684021362893e-5, - 2.6180448559287584e-5, - 5.5752932611508044e-5, + 2.509979394305084e-5, + 2.2683711321080935e-5, + 2.6180377720841363e-5, + 5.575278031910713e-5, ]) # Ensure that we do not have excessive memory allocations diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index 39a1f6e30ba..f26500b411c 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -221,11 +221,11 @@ end @trixi_testset "elixir_euler_ec.jl with flux_hll" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), - l2=[0.07852272782240548, 0.10209790867523805, 0.293873048809011], + l2=[0.07855251823583848, 0.10213903748267686, 0.293985892532479], linf=[ - 0.19244768908604093, - 0.2515941686151897, - 0.7258000837553769, + 0.192621556068018, + 0.25184744005299536, + 0.7264977555504792, ], maxiters=10, surface_flux=flux_hll, diff --git a/test/test_tree_1d_eulergravity.jl b/test/test_tree_1d_eulergravity.jl index 9ab5b287d0b..17bc0c71a7a 100644 --- a/test/test_tree_1d_eulergravity.jl +++ b/test/test_tree_1d_eulergravity.jl @@ -13,14 +13,12 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") @trixi_testset "elixir_eulergravity_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulergravity_convergence.jl"), l2=[ - 0.0002170799126638106, - 0.0002913792848717502, - 0.0006112320856262327, + 0.00021708496949694728, 0.0002913795242132917, + 0.0006112500956552259, ], linf=[ - 0.0004977401033188222, - 0.0013594223337776157, - 0.002041891084400227, + 0.0004977733237385706, 0.0013594226727522418, + 0.0020418739554664, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl index f9be63b87fd..41ad5c32bbd 100644 --- a/test/test_tree_1d_shallowwater.jl +++ b/test/test_tree_1d_shallowwater.jl @@ -143,17 +143,18 @@ end @trixi_testset "elixir_shallowwater_source_terms.jl with flux_hll" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ - 0.0022758146627220154, - 0.015864082886204556, + 0.002275023323848826, + 0.015861093821754046, 4.436491725585346e-5, ], linf=[ - 0.008457195427364006, - 0.057201667446161064, + 0.008461451098266792, + 0.05722331401673486, 9.098379777405796e-5, ], tspan=(0.0, 0.025), - surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal)) + surface_flux=(flux_hll, + flux_nonconservative_fjordholm_etal)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -228,7 +229,7 @@ end 0.05720939349382359, 9.098379777405796e-5, ], - surface_flux=(FluxHydrostaticReconstruction(flux_hll, + surface_flux=(FluxHydrostaticReconstruction(FluxHLL(min_max_speed_naive), hydrostatic_reconstruction_audusse_etal), flux_nonconservative_audusse_etal), tspan=(0.0, 0.025)) @@ -255,7 +256,9 @@ end 3.469453422316143e-15, 3.844551077492042e-8, ], - tspan=(0.0, 0.25)) + tspan=(0.0, 0.25), + surface_flux=(FluxHLL(min_max_speed_naive), + flux_nonconservative_fjordholm_etal),) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -280,6 +283,8 @@ end 3.844551077492042e-8, ], tspan=(0.0, 0.25), + surface_flux=(FluxHLL(min_max_speed_naive), + flux_nonconservative_fjordholm_etal), boundary_condition=boundary_condition_slip_wall) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_tree_2d_mhd.jl b/test/test_tree_2d_mhd.jl index 1f8458075aa..66b47138a44 100644 --- a/test/test_tree_2d_mhd.jl +++ b/test/test_tree_2d_mhd.jl @@ -183,7 +183,7 @@ end end end -@trixi_testset "elixir_mhd_orszag_tang.jl with flux_hll" begin +@trixi_testset "elixir_mhd_orszag_tang.jl with flux_hlle" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_orszag_tang.jl"), l2=[ 0.10806619664693064, diff --git a/test/test_tree_2d_shallowwater.jl b/test/test_tree_2d_shallowwater.jl index 93a8cb63667..01742644736 100644 --- a/test/test_tree_2d_shallowwater.jl +++ b/test/test_tree_2d_shallowwater.jl @@ -195,6 +195,33 @@ end end @trixi_testset "elixir_shallowwater_source_terms.jl with flux_hll" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), + l2=[ + 0.0018952610547425214, + 0.016943425162728183, + 0.017556784292859465, + 6.274146767717414e-5, + ], + linf=[ + 0.0151635341334182, + 0.07967467926956129, + 0.08400050790965174, + 0.0001819675955490041, + ], + tspan=(0.0, 0.025), + surface_flux=(flux_hll, + flux_nonconservative_fjordholm_etal)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_shallowwater_source_terms.jl with FluxHLL(min_max_speed_naive)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ 0.0018957692481057034, @@ -209,7 +236,8 @@ end 0.0001819675955490041, ], tspan=(0.0, 0.025), - surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal)) + surface_flux=(FluxHLL(min_max_speed_naive), + flux_nonconservative_fjordholm_etal)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_3d_euler.jl b/test/test_tree_3d_euler.jl index 02e657e001a..e9e2b82fec5 100644 --- a/test/test_tree_3d_euler.jl +++ b/test/test_tree_3d_euler.jl @@ -92,18 +92,14 @@ end @trixi_testset "elixir_euler_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_convergence.jl"), l2=[ - 0.0003637241020254405, - 0.0003955570866382718, - 0.0003955570866383613, - 0.00039555708663834417, - 0.0007811613481640202, + 0.0003637241020254673, 0.00039555708663848046, + 0.00039555708663832644, 0.0003955570866385083, + 0.0007811613481643962, ], linf=[ - 0.0024000660244674066, - 0.0029635410025339315, - 0.0029635410025292686, - 0.002963541002525938, - 0.007191437359396424, + 0.0024000660244567484, 0.002963541002521053, + 0.0029635410025201647, 0.002963541002522385, + 0.007191437359379549, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -462,6 +458,7 @@ end 2.397746252817731, ], maxiters=5, max_level=6, + surface_flux=FluxHLL(min_max_speed_naive), coverage_override=(maxiters = 2, initial_refinement_level = 1, base_level = 1, max_level = 3)) # Ensure that we do not have excessive memory allocations diff --git a/test/test_tree_3d_mhd.jl b/test/test_tree_3d_mhd.jl index e75685f0b43..74107d462de 100644 --- a/test/test_tree_3d_mhd.jl +++ b/test/test_tree_3d_mhd.jl @@ -184,9 +184,9 @@ end end end -@trixi_testset "elixir_mhd_alfven_wave.jl with Orszag-Tang setup + flux_hll" begin +@trixi_testset "elixir_mhd_alfven_wave.jl with Orszag-Tang setup + flux_hlle" begin # OBS! This setup does not make much sense and is only used to exercise all components of the - # flux_hll implementation + # flux_hlle implementation @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), l2=[ 0.004391143689111404, diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 04eb9f679aa..87d677e1623 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -17,16 +17,12 @@ isdir(outdir) && rm(outdir, recursive = true) @trixi_testset "elixir_euler_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_periodic.jl"), l2=[ - 0.00010978828464875207, - 0.00013010359527356914, - 0.00013010359527326057, - 0.0002987656724828824, + 0.0001099216141882387, 0.0001303795774982892, + 0.00013037957749794242, 0.0002993727892598759, ], linf=[ - 0.00638626102818618, - 0.009804042508242183, - 0.009804042508253286, - 0.02183139311614468, + 0.006407280810928562, 0.009836067015418948, + 0.009836067015398076, 0.021903519038095176, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -41,16 +37,12 @@ end @trixi_testset "elixir_euler_free_stream.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), l2=[ - 3.3937971107485363e-14, - 2.447586447887882e-13, - 1.4585205789296455e-13, - 4.716993468962946e-13, + 3.3937365073416665e-14, 2.44759188939065e-13, + 1.4585198700082895e-13, 4.716940764877479e-13, ], linf=[ - 8.804734719092266e-12, - 6.261270668606045e-11, - 2.93670088247211e-11, - 1.205400224080222e-10, + 8.804956763697191e-12, 6.261199891888225e-11, + 2.936639820205755e-11, 1.20543575121701e-10, ], tspan=(0.0, 0.1), atol=3.0e-13) @@ -78,7 +70,8 @@ end 0.29339040847600434, 0.5915610037764794, ], - tspan=(0.0, 0.25)) + tspan=(0.0, 0.25), + surface_flux=FluxHLL(min_max_speed_naive)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -407,15 +400,15 @@ end @trixi_testset "elixir_shallowwater_source_terms.jl with FluxHydrostaticReconstruction" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ - 0.0011197139793938152, - 0.015430259691310781, - 0.017081031802719724, + 0.001119678684752799, + 0.015429108794630785, + 0.01708275441241111, 5.089218476758271e-6, ], linf=[ - 0.014300809338967824, - 0.12783372461225184, - 0.17625472321992852, + 0.014299564388827513, + 0.12785126473870534, + 0.17626788561725526, 2.6407324614341476e-5, ], surface_flux=(FluxHydrostaticReconstruction(flux_hll, @@ -464,18 +457,19 @@ end @trixi_testset "elixir_shallowwater_source_terms.jl with flux_hll" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ - 0.0011197139793938727, - 0.015430259691311309, - 0.017081031802719554, + 0.0011196786847528799, + 0.015429108794631075, + 0.017082754412411742, 5.089218476759981e-6, ], linf=[ - 0.014300809338967824, - 0.12783372461224918, - 0.17625472321993918, + 0.014299564388830177, + 0.12785126473870667, + 0.17626788561728546, 2.6407324614341476e-5, ], - surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal), + surface_flux=(flux_hll, + flux_nonconservative_fjordholm_etal), tspan=(0.0, 0.025)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -490,16 +484,12 @@ end @trixi_testset "elixir_shallowwater_dirichlet.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_dirichlet.jl"), l2=[ - 1.1577518608940115e-5, - 4.867189932537344e-13, - 4.647273240470541e-13, - 1.1577518608933468e-5, + 1.1577518608938916e-5, 4.859252379740366e-13, + 4.639600837197925e-13, 1.1577518608952174e-5, ], linf=[ - 8.394063878602864e-5, - 1.1469760027632646e-10, - 1.1146619484429974e-10, - 8.394063879602065e-5, + 8.3940638787805e-5, 1.1446362498574484e-10, + 1.1124515748367981e-10, 8.39406387962427e-5, ], tspan=(0.0, 2.0)) # Ensure that we do not have excessive memory allocations @@ -516,16 +506,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_wall_bc_shockcapturing.jl"), l2=[ - 0.04444388691670699, - 0.1527771788033111, - 0.1593763537203512, - 6.225080476986749e-8, + 0.0442113635677511, 0.1537465759364839, 0.16003586586203947, + 6.225080477067782e-8, ], linf=[ - 0.6526506870169639, - 1.980765893182952, - 2.4807635459119757, - 3.982097158683473e-7, + 0.6347820607387928, 2.0078125433846736, 2.530726684667019, + 3.982097165344811e-7, ], tspan=(0.0, 0.05)) # Ensure that we do not have excessive memory allocations @@ -609,12 +595,12 @@ end 1.0066867437607972e-13, 6.889210012578449e-14, 1.568290814572709e-13], - linf=[2.353373051988683e-10, - 2.801543719233024e-11, - 3.930469838486772e-11, + linf=[5.6139981552405516e-11, + 2.842849566864203e-11, + 1.8290174930157832e-11, 4.61017890529547e-11], tspan=(0.0, 0.1), - atol=1.0e-11) + atol=1.0e-10) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let From c7693aaf06587de194147c9c70888c124ed2daf3 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 23 Feb 2024 13:47:12 +0100 Subject: [PATCH 121/166] set version to v0.7.0; closes #1726 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 6b27e6e9999..2fb8e196829 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.11-pre" +version = "0.7.0" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 27d91b3ceeb15e1079b815268738b365635c6cab Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 23 Feb 2024 13:47:49 +0100 Subject: [PATCH 122/166] set development version to v0.7.1-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 2fb8e196829..800c7b4c0fa 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.0" +version = "0.7.1-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 4ee60e7973ff78d553dad65f7f4760443891bdb6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 05:57:09 +0100 Subject: [PATCH 123/166] CompatHelper: bump compat for Trixi to 0.7 for package benchmark, (keep existing compat) (#1853) Co-authored-by: CompatHelper Julia --- benchmark/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/Project.toml b/benchmark/Project.toml index e94144cfd15..51d271e65fc 100644 --- a/benchmark/Project.toml +++ b/benchmark/Project.toml @@ -8,4 +8,4 @@ Trixi = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" BenchmarkTools = "0.5, 0.7, 1.0" OrdinaryDiffEq = "5.65, 6" PkgBenchmark = "0.2.10" -Trixi = "0.4, 0.5, 0.6" +Trixi = "0.4, 0.5, 0.6, 0.7" From e205b0637d72341041f59cc740933a2bc5835687 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 17:30:44 +0100 Subject: [PATCH 124/166] Bump crate-ci/typos from 1.18.0 to 1.18.2 (#1856) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.18.0 to 1.18.2. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.18.0...v1.18.2) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/SpellCheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index b242b6e811e..87e34cb50f3 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.18.0 + uses: crate-ci/typos@v1.18.2 From 8cdb93892dead6e33ae581e60fd8199aec4ee19e Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Wed, 6 Mar 2024 13:24:50 +0100 Subject: [PATCH 125/166] Upwind SBP on curved meshes (#1857) * baseline implementation of the curvilinear USBP for testing * working version of curvilinear upwind solver. Needs significant cleanup of debugging statements and different variants of the rotated flux vector splittings * cleanup of the fdsbp_2d file * clean-up FVS routines in the compressible Euler file * cleanup and remove unnecessary containers * add tests for the new solver * remove extra space * run formatter * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * add specialized calc_metric_terms function for upwind type * revert change to the surface integral function * add reference for curvilinear van Leer splitting * new splitting_drikakis_tsangaris in Cartesian and generalized coordinates * added test for Cartesian splitting_drikakis_tsangaris * run formatter * Update src/equations/compressible_euler_2d.jl * remove orientation_or_normal from Steger-Warming --------- Co-authored-by: Hendrik Ranocha --- .../elixir_euler_free_stream_upwind.jl | 86 ++++++ .../elixir_euler_source_terms_upwind.jl | 87 ++++++ src/Trixi.jl | 3 +- src/equations/compressible_euler_2d.jl | 290 +++++++++++++++++- src/equations/numerical_fluxes.jl | 12 +- src/solvers/dgsem_unstructured/dg_2d.jl | 2 +- src/solvers/fdsbp_tree/fdsbp_2d.jl | 2 +- .../fdsbp_unstructured/containers_2d.jl | 10 +- src/solvers/fdsbp_unstructured/fdsbp_2d.jl | 99 +++++- test/test_tree_2d_fdsbp.jl | 26 ++ test/test_unstructured_2d.jl | 70 +++++ 11 files changed, 659 insertions(+), 28 deletions(-) create mode 100644 examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl create mode 100644 examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl new file mode 100644 index 00000000000..2a1956f9d10 --- /dev/null +++ b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl @@ -0,0 +1,86 @@ +# !!! warning "Experimental implementation (upwind SBP)" +# This is an experimental feature and may change in future releases. + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +initial_condition = initial_condition_constant + +# Boundary conditions for free-stream preservation test +boundary_condition_free_stream = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict(:outerCircle => boundary_condition_free_stream, + :cone1 => boundary_condition_free_stream, + :cone2 => boundary_condition_free_stream, + :iceCream => boundary_condition_free_stream) + +############################################################################### +# Get the Upwind FDSBP approximation space + +# TODO: FDSBP +# Note, one must set `xmin=-1` and `xmax=1` due to the reuse +# of interpolation routines from `calc_node_coordinates!` to create +# the physical coordinates in the mappings. +D_upw = upwind_operators(SummationByPartsOperators.Mattsson2017, + derivative_order = 1, + accuracy_order = 8, + xmin = -1.0, xmax = 1.0, + N = 17) + +flux_splitting = splitting_vanleer_haenel +solver = FDSBP(D_upw, + surface_integral = SurfaceIntegralStrongForm(FluxUpwind(flux_splitting)), + volume_integral = VolumeIntegralUpwind(flux_splitting)) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +# Mesh with second-order boundary polynomials requires an upwind SBP operator +# with (at least) 4th order boundary closure to guarantee the approximation is +# free-stream preserving +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/ec9a345f09199ebe471d35d5c1e4e08f/raw/15975943d8642e42f8292235314b6f1b30aa860d/mesh_inner_outer_boundaries.mesh", + joinpath(@__DIR__, "mesh_inner_outer_boundaries.mesh")) + +mesh = UnstructuredMesh2D(mesh_file) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 1000, + save_initial_solution = true, + save_final_solution = true) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + save_solution, + alive_callback) + +############################################################################### +# run the simulation + +# set small tolerances for the free-stream preservation test +sol = solve(ode, SSPRK43(), abstol = 1.0e-12, reltol = 1.0e-12, + save_everystep = false, callback = callbacks) + +summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl new file mode 100644 index 00000000000..9bd2afa5749 --- /dev/null +++ b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl @@ -0,0 +1,87 @@ +# !!! warning "Experimental implementation (upwind SBP)" +# This is an experimental feature and may change in future releases. + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +initial_condition = initial_condition_convergence_test + +source_term = source_terms_convergence_test + +boundary_condition_eoc = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict(:Top => boundary_condition_eoc, + :Bottom => boundary_condition_eoc, + :Right => boundary_condition_eoc, + :Left => boundary_condition_eoc) + +############################################################################### +# Get the Upwind FDSBP approximation space + +# TODO: FDSBP +# Note, one must set `xmin=-1` and `xmax=1` due to the reuse +# of interpolation routines from `calc_node_coordinates!` to create +# the physical coordinates in the mappings. +D_upw = upwind_operators(SummationByPartsOperators.Mattsson2017, + derivative_order = 1, + accuracy_order = 4, + xmin = -1.0, xmax = 1.0, + N = 9) + +flux_splitting = splitting_drikakis_tsangaris +solver = FDSBP(D_upw, + surface_integral = SurfaceIntegralStrongForm(FluxUpwind(flux_splitting)), + volume_integral = VolumeIntegralUpwind(flux_splitting)) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +# Mesh with first-order boundary polynomials requires an upwind SBP operator +# with (at least) 2nd order boundary closure to guarantee the approximation is +# free-stream preserving +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a4f4743008bf3233957a9ea6ac7a62e0/raw/8b36cc6649153fe0a5723b200368a210a1d74eaf/mesh_refined_box.mesh", + joinpath(@__DIR__, "mesh_refined_box.mesh")) + +mesh = UnstructuredMesh2D(mesh_file) + +############################################################################### +# create the semidiscretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_term, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 1000, + save_initial_solution = true, + save_final_solution = true) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + save_solution, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, SSPRK43(), abstol = 1.0e-6, reltol = 1.0e-6, + save_everystep = false, callback = callbacks) + +summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index 5f8cd9cae8e..da7359999c5 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -191,7 +191,8 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle, FluxUpwind export splitting_steger_warming, splitting_vanleer_haenel, - splitting_coirier_vanleer, splitting_lax_friedrichs + splitting_coirier_vanleer, splitting_lax_friedrichs, + splitting_drikakis_tsangaris export initial_condition_constant, initial_condition_gauss, diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl index f5a632723cf..43f15a3cfb9 100644 --- a/src/equations/compressible_euler_2d.jl +++ b/src/equations/compressible_euler_2d.jl @@ -689,7 +689,9 @@ end orientation::Integer, equations::CompressibleEulerEquations2D) -Splitting of the compressible Euler flux of Steger and Warming. +Splitting of the compressible Euler flux of Steger and Warming. For +curvilinear coordinates use the improved Steger-Warming-type splitting +[`splitting_drikakis_tsangaris`](@ref). Returns a tuple of the fluxes "minus" (associated with waves going into the negative axis direction) and "plus" (associated with waves going into the @@ -809,6 +811,174 @@ end return SVector(f1m, f2m, f3m, f4m) end +""" + splitting_drikakis_tsangaris(u, orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + splitting_drikakis_tsangaris(u, which::Union{Val{:minus}, Val{:plus}} + orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + +Improved variant of the Steger-Warming flux vector splitting +[`splitting_steger_warming`](@ref) for generalized coordinates. +This splitting also reformulates the energy +flux as in Hänel et al. to obtain conservation of the total temperature +for inviscid flows. + +Returns a tuple of the fluxes "minus" (associated with waves going into the +negative axis direction) and "plus" (associated with waves going into the +positive axis direction). If only one of the fluxes is required, use the +function signature with argument `which` set to `Val{:minus}()` or `Val{:plus}()`. + +!!! warning "Experimental implementation (upwind SBP)" + This is an experimental feature and may change in future releases. + +## References + +- D. Drikakis and S. Tsangaris (1993) + On the solution of the compressible Navier-Stokes equations using + improved flux vector splitting methods + [DOI: 10.1016/0307-904X(93)90054-K](https://doi.org/10.1016/0307-904X(93)90054-K) +- D. Hänel, R. Schwane and G. Seider (1987) + On the accuracy of upwind schemes for the solution of the Navier-Stokes equations + [DOI: 10.2514/6.1987-1105](https://doi.org/10.2514/6.1987-1105) +""" +@inline function splitting_drikakis_tsangaris(u, orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + fm = splitting_drikakis_tsangaris(u, Val{:minus}(), orientation_or_normal_direction, + equations) + fp = splitting_drikakis_tsangaris(u, Val{:plus}(), orientation_or_normal_direction, + equations) + return fm, fp +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:plus}, orientation::Integer, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + if orientation == 1 + lambda1 = v1 + a + lambda2 = v1 - a + + lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :) + lambda2_p = positive_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1p = 0.5 * rho * (lambda1_p + lambda2_p) + f2p = f1p * v1 + rhoa_2gamma * (lambda1_p - lambda2_p) + f3p = f1p * v2 + f4p = f1p * H + else # orientation == 2 + lambda1 = v2 + a + lambda2 = v2 - a + + lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :) + lambda2_p = positive_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1p = 0.5 * rho * (lambda1_p + lambda2_p) + f2p = f1p * v1 + f3p = f1p * v2 + rhoa_2gamma * (lambda1_p - lambda2_p) + f4p = f1p * H + end + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:minus}, orientation::Integer, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + if orientation == 1 + lambda1 = v1 + a + lambda2 = v1 - a + + lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :) + lambda2_m = negative_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1m = 0.5 * rho * (lambda1_m + lambda2_m) + f2m = f1m * v1 + rhoa_2gamma * (lambda1_m - lambda2_m) + f3m = f1m * v2 + f4m = f1m * H + else # orientation == 2 + lambda1 = v2 + a + lambda2 = v2 - a + + lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :) + lambda2_m = negative_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1m = 0.5 * rho * (lambda1_m + lambda2_m) + f2m = f1m * v1 + f3m = f1m * v2 + rhoa_2gamma * (lambda1_m - lambda2_m) + f4m = f1m * H + end + return SVector(f1m, f2m, f3m, f4m) +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:plus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + + lambda1 = v_n + a + lambda2 = v_n - a + + lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :) + lambda2_p = positive_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1p = 0.5 * rho * (lambda1_p + lambda2_p) + f2p = f1p * v1 + rhoa_2gamma * normal_direction[1] * (lambda1_p - lambda2_p) + f3p = f1p * v2 + rhoa_2gamma * normal_direction[2] * (lambda1_p - lambda2_p) + f4p = f1p * H + + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:minus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + + lambda1 = v_n + a + lambda2 = v_n - a + + lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :) + lambda2_m = negative_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1m = 0.5 * rho * (lambda1_m + lambda2_m) + f2m = f1m * v1 + rhoa_2gamma * normal_direction[1] * (lambda1_m - lambda2_m) + f3m = f1m * v2 + rhoa_2gamma * normal_direction[2] * (lambda1_m - lambda2_m) + f4m = f1m * H + + return SVector(f1m, f2m, f3m, f4m) +end + """ FluxLMARS(c)(u_ll, u_rr, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) @@ -902,10 +1072,10 @@ end end """ - splitting_vanleer_haenel(u, orientation::Integer, + splitting_vanleer_haenel(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) splitting_vanleer_haenel(u, which::Union{Val{:minus}, Val{:plus}} - orientation::Integer, + orientation_or_normal_direction, equations::CompressibleEulerEquations2D) Splitting of the compressible Euler flux from van Leer. This splitting further @@ -913,7 +1083,8 @@ contains a reformulation due to Hänel et al. where the energy flux uses the enthalpy. The pressure splitting is independent from the splitting of the convective terms. As such there are many pressure splittings suggested across the literature. We implement the 'p4' variant suggested by Liou and Steffen as -it proved the most robust in practice. +it proved the most robust in practice. For details on the curvilinear variant +of this flux vector splitting see Anderson et al. Returns a tuple of the fluxes "minus" (associated with waves going into the negative axis direction) and "plus" (associated with waves going into the @@ -934,11 +1105,16 @@ function signature with argument `which` set to `Val{:minus}()` or `Val{:plus}() - Meng-Sing Liou and Chris J. Steffen, Jr. (1991) High-Order Polynomial Expansions (HOPE) for Flux-Vector Splitting [NASA Technical Memorandum](https://ntrs.nasa.gov/citations/19910016425) +- W. Kyle Anderson, James L. Thomas, and Bram van Leer (1986) + Comparison of Finite Volume Flux Vector Splittings for the Euler Equations + [DOI: 10.2514/3.9465](https://doi.org/10.2514/3.9465) """ -@inline function splitting_vanleer_haenel(u, orientation::Integer, +@inline function splitting_vanleer_haenel(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) - fm = splitting_vanleer_haenel(u, Val{:minus}(), orientation, equations) - fp = splitting_vanleer_haenel(u, Val{:plus}(), orientation, equations) + fm = splitting_vanleer_haenel(u, Val{:minus}(), orientation_or_normal_direction, + equations) + fp = splitting_vanleer_haenel(u, Val{:plus}(), orientation_or_normal_direction, + equations) return fm, fp end @@ -1002,11 +1178,57 @@ end return SVector(f1m, f2m, f3m, f4m) end +@inline function splitting_vanleer_haenel(u, ::Val{:plus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + M = v_n / a + p_plus = 0.5 * (1 + equations.gamma * M) * p + + f1p = 0.25 * rho * a * (M + 1)^2 + f2p = f1p * v1 + normal_direction[1] * p_plus + f3p = f1p * v2 + normal_direction[2] * p_plus + f4p = f1p * H + + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_vanleer_haenel(u, ::Val{:minus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + M = v_n / a + p_minus = 0.5 * (1 - equations.gamma * M) * p + + f1m = -0.25 * rho * a * (M - 1)^2 + f2m = f1m * v1 + normal_direction[1] * p_minus + f3m = f1m * v2 + normal_direction[2] * p_minus + f4m = f1m * H + + return SVector(f1m, f2m, f3m, f4m) +end + """ - splitting_lax_friedrichs(u, orientation::Integer, + splitting_lax_friedrichs(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) splitting_lax_friedrichs(u, which::Union{Val{:minus}, Val{:plus}} - orientation::Integer, + orientation_or_normal_direction, equations::CompressibleEulerEquations2D) Naive local Lax-Friedrichs style flux splitting of the form `f⁺ = 0.5 (f + λ u)` @@ -1021,10 +1243,12 @@ function signature with argument `which` set to `Val{:minus}()` or `Val{:plus}() !!! warning "Experimental implementation (upwind SBP)" This is an experimental feature and may change in future releases. """ -@inline function splitting_lax_friedrichs(u, orientation::Integer, +@inline function splitting_lax_friedrichs(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) - fm = splitting_lax_friedrichs(u, Val{:minus}(), orientation, equations) - fp = splitting_lax_friedrichs(u, Val{:plus}(), orientation, equations) + fm = splitting_lax_friedrichs(u, Val{:minus}(), orientation_or_normal_direction, + equations) + fp = splitting_lax_friedrichs(u, Val{:plus}(), orientation_or_normal_direction, + equations) return fm, fp end @@ -1082,6 +1306,48 @@ end return SVector(f1m, f2m, f3m, f4m) end +@inline function splitting_lax_friedrichs(u, ::Val{:plus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho_e = last(u) + rho, v1, v2, p = cons2prim(u, equations) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + lambda = 0.5 * (sqrt(v1^2 + v2^2) + a) + + v_normal = v1 * normal_direction[1] + v2 * normal_direction[2] + rho_v_normal = rho * v_normal + + f1p = 0.5 * rho_v_normal + lambda * u[1] + f2p = 0.5 * rho_v_normal * v1 + 0.5 * p * normal_direction[1] + lambda * u[2] + f3p = 0.5 * rho_v_normal * v2 + 0.5 * p * normal_direction[2] + lambda * u[3] + f4p = 0.5 * rho_v_normal * H + lambda * u[4] + + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_lax_friedrichs(u, ::Val{:minus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho_e = last(u) + rho, v1, v2, p = cons2prim(u, equations) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + lambda = 0.5 * (sqrt(v1^2 + v2^2) + a) + + v_normal = v1 * normal_direction[1] + v2 * normal_direction[2] + rho_v_normal = rho * v_normal + + f1m = 0.5 * rho_v_normal - lambda * u[1] + f2m = 0.5 * rho_v_normal * v1 + 0.5 * p * normal_direction[1] - lambda * u[2] + f3m = 0.5 * rho_v_normal * v2 + 0.5 * p * normal_direction[2] - lambda * u[3] + f4m = 0.5 * rho_v_normal * H - lambda * u[4] + + return SVector(f1m, f2m, f3m, f4m) +end + # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the # maximum velocity magnitude plus the maximum speed of sound @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 6794c71a32b..e3e798381ae 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -415,7 +415,8 @@ flux vector splitting. The [`SurfaceIntegralUpwind`](@ref) with a given `splitting` is equivalent to the [`SurfaceIntegralStrongForm`](@ref) with `FluxUpwind(splitting)` -as numerical flux (up to floating point differences). +as numerical flux (up to floating point differences). Note, that +[`SurfaceIntegralUpwind`](@ref) is only available on [`TreeMesh`](@ref). !!! warning "Experimental implementation (upwind SBP)" This is an experimental feature and may change in future releases. @@ -431,5 +432,14 @@ end return fm + fp end +@inline function (numflux::FluxUpwind)(u_ll, u_rr, + normal_direction::AbstractVector, + equations::AbstractEquations{2}) + @unpack splitting = numflux + f_tilde_m = splitting(u_rr, Val{:minus}(), normal_direction, equations) + f_tilde_p = splitting(u_ll, Val{:plus}(), normal_direction, equations) + return f_tilde_m + f_tilde_p +end + Base.show(io::IO, f::FluxUpwind) = print(io, "FluxUpwind(", f.splitting, ")") end # @muladd diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index b12a96c4c31..988e995d6b7 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -77,7 +77,7 @@ function rhs!(du, u, t, end # Apply Jacobian from mapping to reference element - # Note! this routine is reused from dg_curved/dg_2d.jl + # Note! this routine is reused from dgsem_structured/dg_2d.jl @trixi_timeit timer() "Jacobian" apply_jacobian!(du, mesh, equations, dg, cache) # Calculate source terms diff --git a/src/solvers/fdsbp_tree/fdsbp_2d.jl b/src/solvers/fdsbp_tree/fdsbp_2d.jl index 09d18cecd75..36afbbc022f 100644 --- a/src/solvers/fdsbp_tree/fdsbp_2d.jl +++ b/src/solvers/fdsbp_tree/fdsbp_2d.jl @@ -19,7 +19,7 @@ function create_cache(mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, equations, return (; f_threaded) end -function create_cache(mesh::TreeMesh{2}, equations, +function create_cache(mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, equations, volume_integral::VolumeIntegralUpwind, dg, uEltype) u_node = SVector{nvariables(equations), uEltype}(ntuple(_ -> zero(uEltype), Val{nvariables(equations)}())) diff --git a/src/solvers/fdsbp_unstructured/containers_2d.jl b/src/solvers/fdsbp_unstructured/containers_2d.jl index 3857c2d8a20..f68b1e00f59 100644 --- a/src/solvers/fdsbp_unstructured/containers_2d.jl +++ b/src/solvers/fdsbp_unstructured/containers_2d.jl @@ -9,7 +9,7 @@ #! format: noindent # initialize all the values in the container of a general FD block (either straight sided or curved) -# OBS! Requires the SBP derivative matrix in order to compute metric terms that are free-stream preserving +# OBS! Requires the SBP derivative matrix in order to compute metric terms. function init_element!(elements, element, basis::AbstractDerivativeOperator, corners_or_surface_curves) calc_node_coordinates!(elements.node_coordinates, element, get_nodes(basis), @@ -29,9 +29,15 @@ function init_element!(elements, element, basis::AbstractDerivativeOperator, return elements end +# Specialization to pass the central differencing matrix from an upwind SBP operator +function calc_metric_terms!(jacobian_matrix, element, + D_SBP::SummationByPartsOperators.UpwindOperators, + node_coordinates) + calc_metric_terms!(jacobian_matrix, element, D_SBP.central, node_coordinates) +end + # construct the metric terms for a FDSBP element "block". Directly use the derivative matrix # applied to the node coordinates. -# TODO: FD; How to make this work for the upwind solver because basis has three available derivative matrices function calc_metric_terms!(jacobian_matrix, element, D_SBP::AbstractDerivativeOperator, node_coordinates) diff --git a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl index b459f4c42cc..c35772cdf18 100644 --- a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl +++ b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl @@ -25,8 +25,6 @@ function create_cache(mesh::UnstructuredMesh2D, equations, dg::FDSBP, RealT, uEl return cache end -# TODO: FD; Upwind versions of surface / volume integral - # 2D volume integral contributions for `VolumeIntegralStrongForm` # OBS! This is the standard (not de-aliased) form of the volume integral. # So it is not provably stable for variable coefficients due to the the metric terms. @@ -86,6 +84,91 @@ end return nothing end +# 2D volume integral contributions for `VolumeIntegralUpwind`. +# Note that the plus / minus notation of the operators does not refer to the +# upwind / downwind directions of the fluxes. +# Instead, the plus / minus refers to the direction of the biasing within +# the finite difference stencils. Thus, the D^- operator acts on the positive +# part of the flux splitting f^+ and the D^+ operator acts on the negative part +# of the flux splitting f^-. +function calc_volume_integral!(du, u, + mesh::UnstructuredMesh2D, + nonconservative_terms::False, equations, + volume_integral::VolumeIntegralUpwind, + dg::FDSBP, cache) + # Assume that + # dg.basis isa SummationByPartsOperators.UpwindOperators + D_minus = dg.basis.minus # Upwind SBP D^- derivative operator + D_plus = dg.basis.plus # Upwind SBP D^+ derivative operator + @unpack f_minus_plus_threaded, f_minus_threaded, f_plus_threaded = cache + @unpack splitting = volume_integral + @unpack contravariant_vectors = cache.elements + + # SBP operators from SummationByPartsOperators.jl implement the basic interface + # of matrix-vector multiplication. Thus, we pass an "array of structures", + # packing all variables per node in an `SVector`. + if nvariables(equations) == 1 + # `reinterpret(reshape, ...)` removes the leading dimension only if more + # than one variable is used. + u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u), + nnodes(dg), nnodes(dg), nelements(dg, cache)) + du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)}, + du), + nnodes(dg), nnodes(dg), nelements(dg, cache)) + else + u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u) + du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)}, + du) + end + + # Use the tensor product structure to compute the discrete derivatives of + # the fluxes line-by-line and add them to `du` for each element. + @threaded for element in eachelement(dg, cache) + # f_minus_plus_element wraps the storage provided by f_minus_element and + # f_plus_element such that we can use a single assignment below. + # f_minus_element and f_plus_element are updated whenever we update + # `f_minus_plus_element[i, j] = ...` below. + f_minus_plus_element = f_minus_plus_threaded[Threads.threadid()] + f_minus_element = f_minus_threaded[Threads.threadid()] + f_plus_element = f_plus_threaded[Threads.threadid()] + u_element = view(u_vectors, :, :, element) + + # x direction + # We use flux vector splittings in the directions of the contravariant + # basis vectors. Thus, we do not use a broadcasting operation like + # @. f_minus_plus_element = splitting(u_element, 1, equations) + # in the Cartesian case but loop over all nodes. + for j in eachnode(dg), i in eachnode(dg) + # contravariant vectors computed with central D matrix + Ja1 = get_contravariant_vector(1, contravariant_vectors, i, j, element) + f_minus_plus_element[i, j] = splitting(u_element[i, j], Ja1, equations) + end + + for j in eachnode(dg) + mul!(view(du_vectors, :, j, element), D_minus, view(f_plus_element, :, j), + one(eltype(du)), one(eltype(du))) + mul!(view(du_vectors, :, j, element), D_plus, view(f_minus_element, :, j), + one(eltype(du)), one(eltype(du))) + end + + # y direction + for j in eachnode(dg), i in eachnode(dg) + # contravariant vectors computed with central D matrix + Ja2 = get_contravariant_vector(2, contravariant_vectors, i, j, element) + f_minus_plus_element[i, j] = splitting(u_element[i, j], Ja2, equations) + end + + for i in eachnode(dg) + mul!(view(du_vectors, i, :, element), D_minus, view(f_plus_element, i, :), + one(eltype(du)), one(eltype(du))) + mul!(view(du_vectors, i, :, element), D_plus, view(f_minus_element, i, :), + one(eltype(du)), one(eltype(du))) + end + end + + return nothing +end + # Note! The local side numbering for the unstructured quadrilateral element implementation differs # from the structured TreeMesh or StructuredMesh local side numbering: # @@ -114,8 +197,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at -x u_node = get_node_vars(u, equations, dg, 1, l, element) # compute internal flux in normal direction on side 4 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 4, - element) + outward_direction = get_surface_normal(normal_directions, l, 4, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 4, element) multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node), @@ -124,8 +206,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at +x u_node = get_node_vars(u, equations, dg, nnodes(dg), l, element) # compute internal flux in normal direction on side 2 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 2, - element) + outward_direction = get_surface_normal(normal_directions, l, 2, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 2, element) multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node), @@ -134,8 +215,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at -y u_node = get_node_vars(u, equations, dg, l, 1, element) # compute internal flux in normal direction on side 1 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 1, - element) + outward_direction = get_surface_normal(normal_directions, l, 1, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 1, element) multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node), @@ -144,8 +224,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at +y u_node = get_node_vars(u, equations, dg, l, nnodes(dg), element) # compute internal flux in normal direction on side 3 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 3, - element) + outward_direction = get_surface_normal(normal_directions, l, 3, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 3, element) multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node), diff --git a/test/test_tree_2d_fdsbp.jl b/test/test_tree_2d_fdsbp.jl index c0844ee5dba..d477cab0563 100644 --- a/test/test_tree_2d_fdsbp.jl +++ b/test/test_tree_2d_fdsbp.jl @@ -102,6 +102,32 @@ end end end + @trixi_testset "elixir_euler_convergence.jl with Drikakis-Tsangaris splitting" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_convergence.jl"), + l2=[ + 1.708838999643608e-6, + 1.7437997854485807e-6, + 1.7437997854741082e-6, + 5.457223460116349e-6, + ], + linf=[ + 9.796504911285808e-6, + 9.614745899888533e-6, + 9.614745899444443e-6, + 4.02610718399643e-5, + ], + tspan=(0.0, 0.1), flux_splitting=splitting_drikakis_tsangaris) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + @trixi_testset "elixir_euler_kelvin_helmholtz_instability.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_kelvin_helmholtz_instability.jl"), diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 87d677e1623..8a62dcaec3c 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -610,6 +610,76 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "FDSBP (upwind): elixir_euler_source_terms_upwind.jl" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_source_terms_upwind.jl"), + l2=[4.085391175504837e-5, + 7.19179253772227e-5, + 7.191792537723135e-5, + 0.00021775241532855398], + linf=[0.0004054489124620808, + 0.0006164432358217731, + 0.0006164432358186644, + 0.001363103391379461], + tspan=(0.0, 0.05), + atol=1.0e-10) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "FDSBP (upwind): elixir_euler_source_terms_upwind.jl with LF splitting" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_source_terms_upwind.jl"), + l2=[3.8300267071890586e-5, + 5.295846741663533e-5, + 5.295846741663526e-5, + 0.00017564759295593478], + linf=[0.00018810716496542312, + 0.0003794187430412599, + 0.0003794187430412599, + 0.0009632958510650269], + tspan=(0.0, 0.025), + flux_splitting=splitting_lax_friedrichs, + atol=1.0e-10) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "FDSBP (upwind): elixir_euler_free_stream_upwind.jl" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_free_stream_upwind.jl"), + l2=[3.2114065566681054e-14, + 2.132488788134846e-14, + 2.106144937311659e-14, + 8.609642264224197e-13], + linf=[3.354871935812298e-11, + 7.006478730531285e-12, + 1.148153794261475e-11, + 9.041265514042607e-10], + tspan=(0.0, 0.05), + atol=1.0e-10) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory From 253c358243ed2a8b6f63c422b53cf09a68188f68 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 6 Mar 2024 13:25:19 +0100 Subject: [PATCH 126/166] set version to v0.7.1 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 800c7b4c0fa..53b859a422f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.1-pre" +version = "0.7.1" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From ca082c2eb1273611cf38e80c6d2dab04e8f8177f Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 6 Mar 2024 13:25:37 +0100 Subject: [PATCH 127/166] set development version to v0.7.2-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 53b859a422f..e8eb7c788ce 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.1" +version = "0.7.2-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 4bf61a0fd3abb21b457ea9d9ee2c19c835d018c3 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 6 Mar 2024 14:25:25 +0100 Subject: [PATCH 128/166] force new FFMPEG.jl version for tests (#1858) --- test/Project.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/Project.toml b/test/Project.toml index a376c2805ea..1a042dab44f 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,6 +2,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +FFMPEG = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" @@ -15,6 +16,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Aqua = "0.8" CairoMakie = "0.10" Downloads = "1" +FFMPEG = "0.4" ForwardDiff = "0.10.24" LinearAlgebra = "1" MPI = "0.20" From 3bed8285ca2bb3857050bde4fd1408d151cb760d Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 7 Mar 2024 10:41:56 +0100 Subject: [PATCH 129/166] Check BCs for periodicity for periodic Tree & Structured meshes (#1860) * Check BCs for periodicity for periodic meshes * default case for periodic bcs * fmt * specialize * error ant fmt * isperiodic TreeMesh * avoid if * shorten * shorten * Make meshes non-periodic * fix rti * shorten dispatch --------- Co-authored-by: Hendrik Ranocha --- .../elixir_advection_coupled.jl | 12 ++-- ...lixir_euler_rayleigh_taylor_instability.jl | 2 +- .../elixir_euler_warm_bubble.jl | 3 +- src/meshes/tree_mesh.jl | 3 + .../semidiscretization_hyperbolic.jl | 70 +++++++++++++++++++ ...semidiscretization_hyperbolic_parabolic.jl | 2 + 6 files changed, 86 insertions(+), 6 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 43b68f21b03..0002bb8d374 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -53,7 +53,8 @@ cells_per_dimension = (8, 8) coordinates_min1 = (-1.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) -mesh1 = StructuredMesh(cells_per_dimension, coordinates_min1, coordinates_max1) +mesh1 = StructuredMesh(cells_per_dimension, coordinates_min1, coordinates_max1, + periodicity = false) # Define the coupling functions coupling_function12 = (x, u, equations_other, equations_own) -> u @@ -84,7 +85,8 @@ semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_converg coordinates_min2 = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) -mesh2 = StructuredMesh(cells_per_dimension, coordinates_min2, coordinates_max2) +mesh2 = StructuredMesh(cells_per_dimension, coordinates_min2, coordinates_max2, + periodicity = false) # Define the coupling functions coupling_function21 = (x, u, equations_other, equations_own) -> u @@ -115,7 +117,8 @@ semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_converg coordinates_min3 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max3 = (0.0, 0.0) # maximum coordinates (max(x), max(y)) -mesh3 = StructuredMesh(cells_per_dimension, coordinates_min3, coordinates_max3) +mesh3 = StructuredMesh(cells_per_dimension, coordinates_min3, coordinates_max3, + periodicity = false) # Define the coupling functions coupling_function34 = (x, u, equations_other, equations_own) -> u @@ -146,7 +149,8 @@ semi3 = SemidiscretizationHyperbolic(mesh3, equations, initial_condition_converg coordinates_min4 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max4 = (1.0, 0.0) # maximum coordinates (max(x), max(y)) -mesh4 = StructuredMesh(cells_per_dimension, coordinates_min4, coordinates_max4) +mesh4 = StructuredMesh(cells_per_dimension, coordinates_min4, coordinates_max4, + periodicity = false) # Define the coupling functions coupling_function43 = (x, u, equations_other, equations_own) -> u diff --git a/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl b/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl index 6c254e8bd8b..dd0cc339b20 100644 --- a/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl +++ b/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl @@ -69,7 +69,7 @@ mapping(xi, eta) = SVector(0.25 * 0.5 * (1.0 + xi), 0.5 * (1.0 + eta)) num_elements_per_dimension = 32 cells_per_dimension = (num_elements_per_dimension, num_elements_per_dimension * 4) -mesh = StructuredMesh(cells_per_dimension, mapping) +mesh = StructuredMesh(cells_per_dimension, mapping, periodicity = false) initial_condition = initial_condition_rayleigh_taylor_instability boundary_conditions = (x_neg = boundary_condition_slip_wall, diff --git a/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl index 05c09d57530..38b9386e94e 100644 --- a/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl +++ b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl @@ -100,7 +100,8 @@ coordinates_min = (0.0, 0.0) coordinates_max = (20_000.0, 10_000.0) cells_per_dimension = (64, 32) -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max, + periodicity = (true, false)) semi = SemidiscretizationHyperbolic(mesh, equations, warm_bubble_setup, solver, source_terms = warm_bubble_setup, diff --git a/src/meshes/tree_mesh.jl b/src/meshes/tree_mesh.jl index 05699d17d16..1092fc54cc1 100644 --- a/src/meshes/tree_mesh.jl +++ b/src/meshes/tree_mesh.jl @@ -228,5 +228,8 @@ function total_volume(mesh::TreeMesh) return mesh.tree.length_level_0^ndims(mesh) end +isperiodic(mesh::TreeMesh) = isperiodic(mesh.tree) +isperiodic(mesh::TreeMesh, dimension) = isperiodic(mesh.tree, dimension) + include("parallel_tree_mesh.jl") end # @muladd diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl index 7ebd758de37..f61378a7dca 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic.jl @@ -72,6 +72,8 @@ function SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver _boundary_conditions = digest_boundary_conditions(boundary_conditions, mesh, solver, cache) + check_periodicity_mesh_boundary_conditions(mesh, _boundary_conditions) + SemidiscretizationHyperbolic{typeof(mesh), typeof(equations), typeof(initial_condition), typeof(_boundary_conditions), typeof(source_terms), @@ -210,6 +212,74 @@ function digest_boundary_conditions(boundary_conditions::AbstractArray, mesh, so throw(ArgumentError("Please use a (named) tuple instead of an (abstract) array to supply multiple boundary conditions (to improve performance).")) end +# No checks for these meshes yet available +function check_periodicity_mesh_boundary_conditions(mesh::Union{P4estMesh, + UnstructuredMesh2D, + T8codeMesh, + DGMultiMesh}, + boundary_conditions) +end + +# No actions needed for periodic boundary conditions +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh, + StructuredMesh}, + boundary_conditions::BoundaryConditionPeriodic) +end + +function check_periodicity_mesh_boundary_conditions_x(mesh, x_neg, x_pos) + if isperiodic(mesh, 1) && + (x_neg != BoundaryConditionPeriodic() || + x_pos != BoundaryConditionPeriodic()) + @error "For periodic mesh non-periodic boundary conditions in x-direction are supplied." + end +end + +function check_periodicity_mesh_boundary_conditions_y(mesh, y_neg, y_pos) + if isperiodic(mesh, 2) && + (y_neg != BoundaryConditionPeriodic() || + y_pos != BoundaryConditionPeriodic()) + @error "For periodic mesh non-periodic boundary conditions in y-direction are supplied." + end +end + +function check_periodicity_mesh_boundary_conditions_z(mesh, z_neg, z_pos) + if isperiodic(mesh, 3) && + (z_neg != BoundaryConditionPeriodic() || + z_pos != BoundaryConditionPeriodic()) + @error "For periodic mesh non-periodic boundary conditions in z-direction are supplied." + end +end + +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{1}, + StructuredMesh{1}}, + boundary_conditions::Union{NamedTuple, + Tuple}) + check_periodicity_mesh_boundary_conditions_x(mesh, boundary_conditions[1], + boundary_conditions[2]) +end + +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{2}, + StructuredMesh{2}}, + boundary_conditions::Union{NamedTuple, + Tuple}) + check_periodicity_mesh_boundary_conditions_x(mesh, boundary_conditions[1], + boundary_conditions[2]) + check_periodicity_mesh_boundary_conditions_y(mesh, boundary_conditions[3], + boundary_conditions[4]) +end + +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{3}, + StructuredMesh{3}}, + boundary_conditions::Union{NamedTuple, + Tuple}) + check_periodicity_mesh_boundary_conditions_x(mesh, boundary_conditions[1], + boundary_conditions[2]) + check_periodicity_mesh_boundary_conditions_y(mesh, boundary_conditions[3], + boundary_conditions[4]) + check_periodicity_mesh_boundary_conditions_z(mesh, boundary_conditions[5], + boundary_conditions[6]) +end + function Base.show(io::IO, semi::SemidiscretizationHyperbolic) @nospecialize semi # reduce precompilation time diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl index 0f44941390a..57724374acb 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl @@ -136,6 +136,8 @@ function SemidiscretizationHyperbolicParabolic(mesh, equations, equations_parabo _boundary_conditions_parabolic = digest_boundary_conditions(boundary_conditions_parabolic, mesh, solver, cache) + check_periodicity_mesh_boundary_conditions(mesh, _boundary_conditions) + cache_parabolic = (; create_cache_parabolic(mesh, equations, equations_parabolic, solver, solver_parabolic, RealT, From b8b34ea21cfa22f93daa131c7ef77d8ed8f3f789 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 7 Mar 2024 10:45:17 +0100 Subject: [PATCH 130/166] set version to v0.7.2 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index e8eb7c788ce..9a6e8e0d9a2 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.2-pre" +version = "0.7.2" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From b6bc7c8cae92cf6877fca90c5ef100771a7781a8 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 7 Mar 2024 10:45:30 +0100 Subject: [PATCH 131/166] set development version to v0.7.3-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 9a6e8e0d9a2..6b44af4a3fa 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.2" +version = "0.7.3-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From b11bc10d8ac68798ae2e53f9db3d5c4f824af5fa Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:04:19 +0100 Subject: [PATCH 132/166] add warning (#1863) --- src/solvers/dgsem/basis_lobatto_legendre.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 6a92fd1c066..9e21b88dfa1 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -12,6 +12,7 @@ Create a nodal Lobatto-Legendre basis for polynomials of degree `polydeg`. For the special case `polydeg=0` the DG method reduces to a finite volume method. Therefore, this function sets the center point of the cell as single node. +This exceptional case is currently only supported for TreeMesh! """ struct LobattoLegendreBasis{RealT <: Real, NNODES, VectorT <: AbstractVector{RealT}, From c4bf3df8a4d4e3920b0f774960d129a4af6ee287 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:06:20 +0100 Subject: [PATCH 133/166] Update parallelization.md (#1864) MPI support in T8code came with #1803 --- docs/src/parallelization.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/src/parallelization.md b/docs/src/parallelization.md index f599eb5fafe..2114f30fb87 100644 --- a/docs/src/parallelization.md +++ b/docs/src/parallelization.md @@ -69,8 +69,7 @@ installations. Follow the steps described [here](https://github.com/DLR-AMR/T8co [here](https://github.com/trixi-framework/P4est.jl/blob/main/README.md#installation) for the configuration. The paths that point to `libp4est.so` (and potentially to `libsc.so`) need to be the same for P4est.jl and T8code.jl. This could, e.g., be `libp4est.so` that usually can be found -in `lib/` or `local/lib/` in the installation directory of `t8code`. Note that the `T8codeMesh`, however, -does not support MPI yet. +in `lib/` or `local/lib/` in the installation directory of `t8code`. The preferences for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) always need to be set, even if you do not want to use `HDF5` from Trixi.jl, see also [issue #1079 in HDF5.jl](https://github.com/JuliaIO/HDF5.jl/issues/1079). To set the preferences for HDF5.jl, follow the instructions described From 1ca37cf2271806d203a832d3d99bfa2c14d3226f Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 8 Mar 2024 07:48:24 +0100 Subject: [PATCH 134/166] set capacity also when using MPI (#1862) Co-authored-by: Hendrik Ranocha --- src/meshes/mesh_io.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 337e33e6969..28e6efa8c57 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -74,6 +74,7 @@ function save_mesh_file(mesh::TreeMesh, output_directory, timestep, attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["ndims"] = ndims(mesh) attributes(file)["n_cells"] = n_cells + attributes(file)["capacity"] = mesh.tree.capacity attributes(file)["n_leaf_cells"] = count_leaf_cells(mesh.tree) attributes(file)["minimum_level"] = minimum_level(mesh.tree) attributes(file)["maximum_level"] = maximum_level(mesh.tree) From f235619a49dbc8bd7d84a77269558a64b21a155f Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 8 Mar 2024 12:20:45 +0100 Subject: [PATCH 135/166] Mention hyphen/dash caveat for boundary symbols (#1866) * Mention hyphen/dash caveat for boundary symbols * typo * elaborate --- docs/src/meshes/p4est_mesh.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index 3b35ffcad6f..a14551b3f46 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -256,6 +256,8 @@ By doing so, only nodesets with a label present in `boundary_symbols` are treate Other nodesets that could be used for diagnostics are not treated as external boundaries. Note that there is a leading colon `:` compared to the label in the `.inp` mesh file. This is required to turn the label into a [`Symbol`](https://docs.julialang.org/en/v1/manual/metaprogramming/#Symbols). +**Important**: In Julia, a symbol _cannot_ contain a hyphen/dash `-`, i.e., `:BC-1` is _not_ a valid symbol. +Keep this in mind when importing boundaries, you might have to convert hyphens/dashes `-` to underscores `_` in the `.inp` mesh file, i.e., `BC_1` instead of `BC-1`. A 2D example for this mesh, which is read-in for an unstructured mesh file created with `gmsh`, is presented in `examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl`. From bd060b6d140b2c4f8f717a89899e867593e141c1 Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Thu, 14 Mar 2024 15:49:16 +0100 Subject: [PATCH 136/166] Add functionality for `TimeSeries` callback on `UnstructuredMesh2D` (#1855) * add functionality for TimeSeries callback on UnstructuredMesh2D * Update src/callbacks_step/time_series_dg.jl Co-authored-by: Hendrik Ranocha * Apply suggestions from code review Co-authored-by: Daniel Doehring * add strategy to correctly locate a gauge point within a curvilinear element * add sanity check that the Newton solution is correct * run formatter * implement a more general approach that also works on curved element without issue * run formatter * forgot to format the examples * Apply suggestions from code review Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring * working version of the element finding routine * run formatter * add new elixir for the time series callback * add additional test for the time series callback on an unstructured mesh * add appropriate test * update docstring * add comment about the barycenter computation * add simplifications and comments from code review * adjust variable name to avoid ugly formatting * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * fix variable name * remove Experimental status from the TimeSeriesCallback * move new TimeSeries test into the unit testing * add output_directory creation if not already done. Necessary if this callback is used without the SaveSolution callback * formatting * update test mesh to have one straight-sided element to trigger inverse bi-linear interpolation * update test values * add news item * forgot to update all new test values on the new mesh * update tests and use coverage override to avoid redundancy --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring --- NEWS.md | 12 +- .../elixir_euler_time_series.jl | 115 ++++++++ src/callbacks_step/time_series.jl | 9 +- src/callbacks_step/time_series_dg.jl | 6 +- src/callbacks_step/time_series_dg2d.jl | 279 +++++++++++++++++- test/test_unit.jl | 1 + test/test_unstructured_2d.jl | 33 +++ 7 files changed, 443 insertions(+), 12 deletions(-) create mode 100644 examples/unstructured_2d_dgsem/elixir_euler_time_series.jl diff --git a/NEWS.md b/NEWS.md index d70504d8c85..5b08d51ab89 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,13 +4,19 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes in the v0.7 lifecycle + +#### Added +- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D`. + + ## Changes when updating to v0.7 from v0.6.x #### Added #### Changed -- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` +- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` instead of `min_max_speed_naive`. #### Deprecated @@ -26,7 +32,7 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` -- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` - Added Lighthill-Whitham-Richards (LWR) traffic model @@ -40,7 +46,7 @@ for human readability. #### Changed - The wave speed estimates for `flux_hll`, `FluxHLL()` are now consistent across equations. - In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now + In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now conceptually identical across equations. Users, who have been using `flux_hll` for MHD have now to use `flux_hlle` in order to use the Einfeldt wave speed estimate. diff --git a/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl new file mode 100644 index 00000000000..13233cdadbc --- /dev/null +++ b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl @@ -0,0 +1,115 @@ +# An elixir that has an alternative convergence test that uses +# the `TimeSeriesCallback` on several gauge points. Many of the +# gauge points are selected as "stress tests" for the element +# identification, e.g., a gauge point that lies on an +# element corner of a curvilinear mesh + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +# Modify the manufactured solution test to use `L = sqrt(2)` +# in the initial condition and source terms +function initial_condition_convergence_shifted(x, t, + equations::CompressibleEulerEquations2D) + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + ini = c + A * sin(ω * (x[1] + x[2] - t)) + + rho = ini + rho_v1 = ini + rho_v2 = ini + rho_e = ini^2 + + return SVector(rho, rho_v1, rho_v2, rho_e) +end + +@inline function source_terms_convergence_shifted(u, x, t, + equations::CompressibleEulerEquations2D) + # Same settings as in `initial_condition` + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + γ = equations.gamma + + x1, x2 = x + si, co = sincos(ω * (x1 + x2 - t)) + rho = c + A * si + rho_x = ω * A * co + # Note that d/dt rho = -d/dx rho = -d/dy rho. + + tmp = (2 * rho - 1) * (γ - 1) + + du1 = rho_x + du2 = rho_x * (1 + tmp) + du3 = du2 + du4 = 2 * rho_x * (rho + tmp) + + return SVector(du1, du2, du3, du4) +end + +initial_condition = initial_condition_convergence_shifted + +source_term = source_terms_convergence_shifted + +############################################################################### +# Get the DG approximation space + +solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b434e724e3972a9c4ee48d58c80cdcdb/raw/55c916cd8c0294a2d4a836e960dac7247b7c8ccf/mesh_multiple_flips.mesh", + joinpath(@__DIR__, "mesh_multiple_flips.mesh")) + +mesh = UnstructuredMesh2D(mesh_file, periodicity = true) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_term) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +time_series = TimeSeriesCallback(semi, + [(0.75, 0.7), (1.23, 0.302), (0.8, 1.0), + (0.353553390593274, 0.353553390593274), + (0.505, 1.125), (1.37, 0.89), (0.349, 0.7153), + (0.883883476483184, 0.406586401289607), + (sqrt(2), sqrt(2))]; + interval = 10) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + time_series, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-6, reltol = 1.0e-6, + ode_default_options()..., callback = callbacks); + +summary_callback() # print the timer summary diff --git a/src/callbacks_step/time_series.jl b/src/callbacks_step/time_series.jl index 7baa6b9c5a1..f6d76f0fb15 100644 --- a/src/callbacks_step/time_series.jl +++ b/src/callbacks_step/time_series.jl @@ -23,8 +23,8 @@ After the last time step, the results are stored in an HDF5 file `filename` in d The real data type `RealT` and data type for solution variables `uEltype` default to the respective types used in the solver and the cache. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. +Currently this callback is only implemented for [`TreeMesh`](@ref) in 2D +and [`UnstructuredMesh2D`](@ref). """ mutable struct TimeSeriesCallback{RealT <: Real, uEltype <: Real, SolutionVariables, VariableNames, Cache} @@ -96,6 +96,11 @@ function TimeSeriesCallback(mesh, equations, solver, cache, point_coordinates; throw(ArgumentError("`point_coordinates` must be a matrix of size n_points × ndims")) end + # create the output folder if it does not exist already + if mpi_isroot() && !isdir(output_directory) + mkpath(output_directory) + end + # Transpose point_coordinates to our usual format [ndims, n_points] # Note: They are accepted in a different format to allow direct input from `readdlm` point_coordinates_ = permutedims(point_coordinates) diff --git a/src/callbacks_step/time_series_dg.jl b/src/callbacks_step/time_series_dg.jl index 1b63979d579..ae394afbbfd 100644 --- a/src/callbacks_step/time_series_dg.jl +++ b/src/callbacks_step/time_series_dg.jl @@ -5,8 +5,10 @@ @muladd begin #! format: noindent -# Store time series file for a TreeMesh with a DG solver -function save_time_series_file(time_series_callback, mesh::TreeMesh, equations, dg::DG) +# Store time series file for a DG solver +function save_time_series_file(time_series_callback, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + equations, dg::DG) @unpack (interval, solution_variables, variable_names, output_directory, filename, point_coordinates, point_data, time, step, time_series_cache) = time_series_callback diff --git a/src/callbacks_step/time_series_dg2d.jl b/src/callbacks_step/time_series_dg2d.jl index c15945d6e16..ad7c6851c80 100644 --- a/src/callbacks_step/time_series_dg2d.jl +++ b/src/callbacks_step/time_series_dg2d.jl @@ -6,7 +6,9 @@ #! format: noindent # Creates cache for time series callback -function create_cache_time_series(point_coordinates, mesh::TreeMesh{2}, dg, cache) +function create_cache_time_series(point_coordinates, + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + dg, cache) # Determine element ids for point coordinates element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) @@ -68,6 +70,144 @@ function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, return element_ids end +# Elements on an `UnstructuredMesh2D` are possibly curved. Assume that each +# element is convex, i.e., all interior angles are less than 180 degrees. +# This routine computes the shortest distance from a given point to each element +# surface in the mesh. These distances then indicate possible candidate elements. +# From these candidates we (essentially) apply a ray casting strategy and identify +# the element in which the point lies by comparing the ray formed by the point to +# the nearest boundary to the rays cast by the candidate element barycenters to the +# boundary. If these rays point in the same direction, then we have identified the +# desired element location. +function get_elements_by_coordinates!(element_ids, coordinates, + mesh::UnstructuredMesh2D, + dg, cache) + if length(element_ids) != size(coordinates, 2) + throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) + end + + # Reset element ids - 0 indicates "not (yet) found" + element_ids .= 0 + + # Compute and save the barycentric coordinate on each element + bary_centers = zeros(eltype(mesh.corners), 2, mesh.n_elements) + calc_bary_centers!(bary_centers, dg, cache) + + # Iterate over coordinates + distances = zeros(eltype(mesh.corners), mesh.n_elements) + indices = zeros(Int, mesh.n_elements, 2) + for index in 1:length(element_ids) + # Grab the current point for which the element needs found + point = SVector(coordinates[1, index], + coordinates[2, index]) + + # Compute the minimum distance between the `point` and all the element surfaces + # saved into `distances`. The point in `node_coordinates` that gives said minimum + # distance on each element is saved in `indices` + distances, indices = calc_minimum_surface_distance(point, + cache.elements.node_coordinates, + dg, mesh) + + # Get the candidate elements where the `point` might live + candidates = findall(abs.(minimum(distances) .- distances) .< + 500 * eps(eltype(point))) + + # The minimal surface point is on a boundary so it plays no role which candidate + # we use to grab it. So just use the first one + surface_point = SVector(cache.elements.node_coordinates[1, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]], + cache.elements.node_coordinates[2, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]]) + + # Compute the vector pointing from the current `point` toward the surface + P = surface_point - point + + # If the vector `P` is the zero vector then this `point` is at an element corner or + # on a surface. In this case the choice of a candidate element is ambiguous and + # we just use the first candidate. However, solutions might differ at discontinuous + # interfaces such that this choice may influence the result. + if sum(P .* P) < 500 * eps(eltype(point)) + element_ids[index] = candidates[1] + continue + end + + # Loop through all the element candidates until we find a vector from the barycenter + # to the surface that points in the same direction as the current `point` vector. + # This then gives us the correct element. + for element in 1:length(candidates) + bary_center = SVector(bary_centers[1, candidates[element]], + bary_centers[2, candidates[element]]) + # Vector pointing from the barycenter toward the minimal `surface_point` + B = surface_point - bary_center + if sum(P .* B) > zero(eltype(bary_center)) + element_ids[index] = candidates[element] + break + end + end + end + + return element_ids +end + +# Use the available `node_coordinates` on each element to compute and save the barycenter. +# In essence, the barycenter is like an average where all the x and y node coordinates are +# summed and then we divide by the total number of degrees of freedom on the element, i.e., +# the value of `n^2` in two spatial dimensions. +@inline function calc_bary_centers!(bary_centers, dg, cache) + n = nnodes(dg) + @views for element in eachelement(dg, cache) + bary_centers[1, element] = sum(cache.elements.node_coordinates[1, :, :, + element]) / n^2 + bary_centers[2, element] = sum(cache.elements.node_coordinates[2, :, :, + element]) / n^2 + end + return nothing +end + +# Compute the shortest distance from a `point` to the surface of each element +# using the available `node_coordinates`. Also return the index pair of this +# minimum surface point location. We compute and store in `min_distance` +# the squared norm to avoid computing computationally more expensive square roots. +# Note! Could be made more accurate if the `node_coordinates` were super-sampled +# and reinterpolated onto a higher polynomial degree before this computation. +function calc_minimum_surface_distance(point, node_coordinates, + dg, mesh::UnstructuredMesh2D) + n = nnodes(dg) + min_distance2 = Inf * ones(eltype(mesh.corners), length(mesh)) + indices = zeros(Int, length(mesh), 2) + for k in 1:length(mesh) + # used to ensure that only boundary points are used + on_surface = MVector(false, false) + for j in 1:n + on_surface[2] = (j == 1) || (j == n) + for i in 1:n + on_surface[1] = (i == 1) || (i == n) + if !any(on_surface) + continue + end + node = SVector(node_coordinates[1, i, j, k], + node_coordinates[2, i, j, k]) + distance2 = sum(abs2, node - point) + if distance2 < min_distance2[k] + min_distance2[k] = distance2 + indices[k, 1] = i + indices[k, 2] = j + end + end + end + end + + return min_distance2, indices +end + function get_elements_by_coordinates(coordinates, mesh, dg, cache) element_ids = Vector{Int}(undef, size(coordinates, 2)) get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) @@ -106,8 +246,137 @@ function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, return interpolating_polynomials end -function calc_interpolating_polynomials(coordinates, element_ids, mesh::TreeMesh, dg, - cache) +function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, + element_ids, + mesh::UnstructuredMesh2D, dg::DGSEM, cache) + @unpack nodes = dg.basis + + wbary = barycentric_weights(nodes) + + # Helper array for a straight-sided quadrilateral element + corners = zeros(eltype(mesh.corners), 4, 2) + + for index in 1:length(element_ids) + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Convert to unit coordinates; procedure differs for straight-sided + # versus curvilinear elements + element = element_ids[index] + if !mesh.element_is_curved[element] + for j in 1:2, i in 1:4 + # Pull the (x,y) values of the element corners from the global corners array + corners[i, j] = mesh.corners[j, mesh.element_node_ids[i, element]] + end + # Compute coordinates in reference system + unit_coordinates = invert_bilinear_interpolation(mesh, x, corners) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = straight_side_quad_map(unit_coordinates[1], unit_coordinates[2], + corners) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + else # mesh.element_is_curved[element] + unit_coordinates = invert_transfinite_interpolation(mesh, x, + view(mesh.surface_curves, + :, element)) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = transfinite_quad_map(unit_coordinates[1], unit_coordinates[2], + view(mesh.surface_curves, :, element)) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + end + + # Calculate interpolating polynomial for each dimension, making use of tensor product structure + for d in 1:ndims(mesh) + interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], + nodes, + wbary) + end + end + + return interpolating_polynomials +end + +# Use a Newton iteration to determine the computational coordinates +# (xi, eta) of an (x,y) `point` that is given in physical coordinates +# by inverting the transformation. For straight-sided elements this +# amounts to inverting a bi-linear interpolation. For curved +# elements we invert the transfinite interpolation with linear blending. +# The residual function for the Newton iteration is +# r(xi, eta) = X(xi, eta) - point +# and the Jacobian entries are computed accordingly from either +# `straight_side_quad_map_metrics` or `transfinite_quad_map_metrics`. +# We exploit the 2x2 nature of the problem and directly compute the matrix +# inverse to make things faster. The implementations below are inspired by +# an answer on Stack Overflow (https://stackoverflow.com/a/18332009) where +# the author explicitly states that their code is released to the public domain. +@inline function invert_bilinear_interpolation(mesh::UnstructuredMesh2D, point, + element_corners) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = straight_side_quad_map(xi, eta, element_corners) + J11, J12, J21, J22 = straight_side_quad_map_metrics(xi, eta, element_corners) + + # Compute residuals for the Newton teration for the current (x, y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +@inline function invert_transfinite_interpolation(mesh::UnstructuredMesh2D, point, + surface_curves::AbstractVector{<:CurvedSurface}) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = transfinite_quad_map(xi, eta, surface_curves) + J11, J12, J21, J22 = transfinite_quad_map_metrics(xi, eta, surface_curves) + + # Compute residuals for the Newton teration for the current (x,y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +function calc_interpolating_polynomials(coordinates, element_ids, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) interpolating_polynomials = Array{real(dg), 3}(undef, nnodes(dg), ndims(mesh), length(element_ids)) @@ -121,8 +390,8 @@ end # Record the solution variables at each given point function record_state_at_points!(point_data, u, solution_variables, n_solution_variables, - mesh::TreeMesh{2}, equations, dg::DG, - time_series_cache) + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + equations, dg::DG, time_series_cache) @unpack element_ids, interpolating_polynomials = time_series_cache old_length = length(first(point_data)) new_length = old_length + n_solution_variables diff --git a/test/test_unit.jl b/test/test_unit.jl index 1907a281718..03a78f6918a 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -600,6 +600,7 @@ end end @timed_testset "TimeSeriesCallback" begin + # Test the 2D TreeMesh version of the callback and some warnings @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_2d_dgsem", "elixir_acoustics_gaussian_source.jl"), diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 8a62dcaec3c..6814250dd47 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -198,6 +198,39 @@ end end end +@trixi_testset "elixir_euler_time_series.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_time_series.jl"), + l2=[ + 6.984024099236519e-5, + 6.289022520363763e-5, + 6.550951878107466e-5, + 0.00016222767700879948, + ], + linf=[ + 0.0005367823248620951, + 0.000671293180158461, + 0.0005656680962440319, + 0.0013910024779804075, + ], + tspan=(0.0, 0.2), + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) + # Extra test that the `TimeSeries` callback creates reasonable data + point_data_1 = time_series.affect!.point_data[1] + @test all(isapprox.(point_data_1[1:4], + [1.9546882708551676, 1.9547149531788077, + 1.9547142161310154, 3.821066781119142])) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_acoustics_gauss_wall.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_acoustics_gauss_wall.jl"), l2=[0.029330394861252995, 0.029345079728907965, From 9323c2ae47300ce83976fcdaba8f5801e82e41a5 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 14 Mar 2024 15:54:43 +0100 Subject: [PATCH 137/166] set version to v0.7.3 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 6b44af4a3fa..f2f8a10626a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.3-pre" +version = "0.7.3" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From a528083a1c41d9fd131fc972816881d3276f718e Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 14 Mar 2024 15:54:55 +0100 Subject: [PATCH 138/166] set development version to v0.7.4-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index f2f8a10626a..97da4aec51b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.3" +version = "0.7.4-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From aa9ea20342e3d02445ec2dc53e380c405da3b683 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 15 Mar 2024 10:20:43 +0100 Subject: [PATCH 139/166] reset n_boundaries_per_direction (#1870) --- src/solvers/dgsem_tree/containers_2d.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/solvers/dgsem_tree/containers_2d.jl b/src/solvers/dgsem_tree/containers_2d.jl index 4bfbddead9a..7048739a226 100644 --- a/src/solvers/dgsem_tree/containers_2d.jl +++ b/src/solvers/dgsem_tree/containers_2d.jl @@ -421,6 +421,8 @@ end function init_boundaries!(boundaries, elements, mesh::TreeMesh2D) # Exit early if there are no boundaries to initialize if nboundaries(boundaries) == 0 + # In this case n_boundaries_per_direction still needs to be reset! + boundaries.n_boundaries_per_direction = SVector(0, 0, 0, 0) return nothing end From 38a9a5234cb2e5588fced11c7bac5d9441142014 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 15 Mar 2024 10:21:54 +0100 Subject: [PATCH 140/166] remove some minor allocations for threaded FDSBP (#1868) * do not inline calc_volume_integral! for FDSBP * avoid allocations in unstructured prolong2interfaces! * avoid allocations in unstructured prolong2boundaries! --- src/solvers/dgsem_unstructured/dg_2d.jl | 40 ++++++++++++---------- src/solvers/fdsbp_unstructured/fdsbp_2d.jl | 10 +++--- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index 988e995d6b7..ce602e178d8 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -95,49 +95,51 @@ function prolong2interfaces!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack interfaces = cache + @unpack element_ids, element_side_ids = interfaces + interfaces_u = interfaces.u @threaded for interface in eachinterface(dg, cache) - primary_element = interfaces.element_ids[1, interface] - secondary_element = interfaces.element_ids[2, interface] + primary_element = element_ids[1, interface] + secondary_element = element_ids[2, interface] - primary_side = interfaces.element_side_ids[1, interface] - secondary_side = interfaces.element_side_ids[2, interface] + primary_side = element_side_ids[1, interface] + secondary_side = element_side_ids[2, interface] if primary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, 1, primary_element] + interfaces_u[1, v, i, interface] = u[v, i, 1, primary_element] end elseif primary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] + interfaces_u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] end elseif primary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] + interfaces_u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] end else # primary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, 1, i, primary_element] + interfaces_u[1, v, i, interface] = u[v, 1, i, primary_element] end end if secondary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, 1, secondary_element] + interfaces_u[2, v, i, interface] = u[v, i, 1, secondary_element] end elseif secondary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, nnodes(dg), i, + interfaces_u[2, v, i, interface] = u[v, nnodes(dg), i, secondary_element] end elseif secondary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, nnodes(dg), + interfaces_u[2, v, i, interface] = u[v, i, nnodes(dg), secondary_element] end else # secondary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, 1, i, secondary_element] + interfaces_u[2, v, i, interface] = u[v, 1, i, secondary_element] end end end @@ -278,26 +280,28 @@ function prolong2boundaries!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack boundaries = cache + @unpack element_id, element_side_id = boundaries + boundaries_u = boundaries.u @threaded for boundary in eachboundary(dg, cache) - element = boundaries.element_id[boundary] - side = boundaries.element_side_id[boundary] + element = element_id[boundary] + side = element_side_id[boundary] if side == 1 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, 1, element] + boundaries_u[v, l, boundary] = u[v, l, 1, element] end elseif side == 2 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, nnodes(dg), l, element] + boundaries_u[v, l, boundary] = u[v, nnodes(dg), l, element] end elseif side == 3 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, nnodes(dg), element] + boundaries_u[v, l, boundary] = u[v, l, nnodes(dg), element] end else # side == 4 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, 1, l, element] + boundaries_u[v, l, boundary] = u[v, 1, l, element] end end end diff --git a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl index c35772cdf18..cbe11ac6ac9 100644 --- a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl +++ b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl @@ -28,11 +28,11 @@ end # 2D volume integral contributions for `VolumeIntegralStrongForm` # OBS! This is the standard (not de-aliased) form of the volume integral. # So it is not provably stable for variable coefficients due to the the metric terms. -@inline function calc_volume_integral!(du, u, - mesh::UnstructuredMesh2D, - nonconservative_terms::False, equations, - volume_integral::VolumeIntegralStrongForm, - dg::FDSBP, cache) +function calc_volume_integral!(du, u, + mesh::UnstructuredMesh2D, + nonconservative_terms::False, equations, + volume_integral::VolumeIntegralStrongForm, + dg::FDSBP, cache) D = dg.basis # SBP derivative operator @unpack f_threaded = cache @unpack contravariant_vectors = cache.elements From 2dfde7faf3cc74f066d86148ae6c99ed9e58fa79 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 15 Mar 2024 13:55:10 +0100 Subject: [PATCH 141/166] Docstrings for some methods in basis Lobatto-Legendre (#1874) * Docstrings for some methods in basis LL * double back slash --- .../src/files/scalar_linear_advection_1d.jl | 2 +- src/solvers/dgsem/basis_lobatto_legendre.jl | 77 +++++++++++++++++-- 2 files changed, 72 insertions(+), 7 deletions(-) diff --git a/docs/literate/src/files/scalar_linear_advection_1d.jl b/docs/literate/src/files/scalar_linear_advection_1d.jl index 77ba7b087cc..9b48f29d341 100644 --- a/docs/literate/src/files/scalar_linear_advection_1d.jl +++ b/docs/literate/src/files/scalar_linear_advection_1d.jl @@ -115,7 +115,7 @@ integral = sum(nodes.^3 .* weights) # To approximate the solution, we need to get the polynomial coefficients $\{u_j^{Q_l}\}_{j=0}^N$ # for every element $Q_l$. -# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0$ +# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0 = u(t_0)$ # for every node. x = Matrix{Float64}(undef, length(nodes), n_elements) for element in 1:n_elements diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 9e21b88dfa1..cac1dba9c74 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -404,7 +404,8 @@ function calc_dsplit(nodes, weights) return dsplit end -# Calculate the polynomial derivative matrix D +# Calculate the polynomial derivative matrix D. +# This implements algorithm 37 "PolynomialDerivativeMatrix" from Kopriva's book. function polynomial_derivative_matrix(nodes) n_nodes = length(nodes) d = zeros(n_nodes, n_nodes) @@ -421,6 +422,7 @@ function polynomial_derivative_matrix(nodes) end # Calculate and interpolation matrix (Vandermonde matrix) between two given sets of nodes +# See algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix(nodes_in, nodes_out, baryweights_in = barycentric_weights(nodes_in)) n_nodes_in = length(nodes_in) @@ -433,6 +435,7 @@ function polynomial_interpolation_matrix(nodes_in, nodes_out, return vandermonde end +# This implements algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix!(vandermonde, nodes_in, nodes_out, baryweights_in) @@ -463,7 +466,19 @@ function polynomial_interpolation_matrix!(vandermonde, return vandermonde end -# Calculate the barycentric weights for a given node distribution. +""" + barycentric_weights(nodes) + +Calculate the barycentric weights for a given node distribution, i.e., +```math +w_j = \\frac{1}{ \\prod_{k \\neq j} \\left( x_j - x_k \\right ) } +``` + +For details, see (especially Section 3) +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function barycentric_weights(nodes) n_nodes = length(nodes) weights = ones(n_nodes) @@ -494,12 +509,31 @@ function calc_lhat(x, nodes, weights) return lhat end -# Calculate Lagrange polynomials for a given node distribution. +""" + lagrange_interpolating_polynomials(x, nodes, wbary) + +Calculate Lagrange polynomials for a given node distribution with +associated barycentric weights `wbary` at a given point `x` on the +reference interval ``[-1, 1]``. + +This returns all ``l_j(x)``, i.e., the Lagrange polynomials for each node ``x_j``. +Thus, to obtain the interpolating polynomial ``p(x)`` at ``x``, one has to +multiply the Lagrange polynomials with the nodal values ``u_j`` and sum them up: +``p(x) = \\sum_{j=1}^{n} u_j l_j(x)``. + +For details, see e.g. Section 2 of +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function lagrange_interpolating_polynomials(x, nodes, wbary) n_nodes = length(nodes) polynomials = zeros(n_nodes) for i in 1:n_nodes + # Avoid division by zero when `x` is close to node by using + # the Kronecker-delta property at nodes + # of the Lagrange interpolation polynomials. if isapprox(x, nodes[i], rtol = eps(x)) polynomials[i] = 1 return polynomials @@ -518,6 +552,17 @@ function lagrange_interpolating_polynomials(x, nodes, wbary) return polynomials end +""" + gauss_lobatto_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the (Legendre-)Gauss-Lobatto quadrature. +This implements algorithm 25 "GaussLobattoNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" # From FLUXO (but really from blue book by Kopriva) function gauss_lobatto_nodes_weights(n_nodes::Integer) # From Kopriva's book @@ -585,7 +630,7 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) return nodes, weights end -# From FLUXO (but really from blue book by Kopriva) +# From FLUXO (but really from blue book by Kopriva, algorithm 24) function calc_q_and_l(N::Integer, x::Float64) L_Nm2 = 1.0 L_Nm1 = x @@ -609,7 +654,17 @@ function calc_q_and_l(N::Integer, x::Float64) end calc_q_and_l(N::Integer, x::Real) = calc_q_and_l(N, convert(Float64, x)) -# From FLUXO (but really from blue book by Kopriva) +""" + gauss_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the Gauss-Legendre quadrature. +This implements algorithm 23 "LegendreGaussNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function gauss_nodes_weights(n_nodes::Integer) # From Kopriva's book n_iterations = 10 @@ -666,7 +721,17 @@ function gauss_nodes_weights(n_nodes::Integer) end end -# From FLUXO (but really from blue book by Kopriva) +""" + legendre_polynomial_and_derivative(N::Int, x::Real) + +Computes the Legendre polynomial of degree `N` and its derivative at `x`. +This implements algorithm 22 "LegendrePolynomialAndDerivative" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function legendre_polynomial_and_derivative(N::Int, x::Real) if N == 0 poly = 1.0 From 1afcb33e2a6f030cbd7d7cb453029c05e2465e69 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 18 Mar 2024 10:43:39 +0000 Subject: [PATCH 142/166] Removed coupling documentation from this PR, as it should be part of the coupling PR. --- docs/src/coupling.md | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100644 docs/src/coupling.md diff --git a/docs/src/coupling.md b/docs/src/coupling.md deleted file mode 100644 index f715bfbdc0e..00000000000 --- a/docs/src/coupling.md +++ /dev/null @@ -1,27 +0,0 @@ -# [Coupling](@id coupling-id) -A complex simulation can consist of different spatial domains in which -different equations are being solved, different numerical methods being used -or the grid structure is different. -One example would be a fluid in a tank and an extended hot plate attached to it. -We would then like to solve the Navier-Stokes equations in the fluid domain -and the heat conduction equations in the plate. -The coupling would happen at the interface through the exchange of thermal energy. - - -## Converter Coupling -We can have the case where the two systems do not share any variables, but -share some of the physics. -Here, the same physics is just represented in a different form and with -different variables. -This is the case for a fluid system on one side and a Vlasov system on the other. -To translate the fields from one description to the other one needs to use -converter functions. - -In the general case we have one system with `m` variables `u_i` and another -system with `n` variables `v_j`. -We then define two coupling functions, one that transforms `u_i` into `v_i` -and one that goes the other way. - -In their minimal form they take the position vector `x` and state vector `u` -and return the transformed variables. -Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled.jl`. From 052f445e6af815b2c4e97982d9234e7c76ba95be Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 18 Mar 2024 10:44:17 +0000 Subject: [PATCH 143/166] Removed reference to coupling documentation, as it is part of a different PR. --- docs/make.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/make.jl b/docs/make.jl index ac7013a5a82..df8ac04be12 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -107,7 +107,6 @@ makedocs( ], "Time integration" => "time_integration.md", "Callbacks" => "callbacks.md", - "Coupling" => "coupling.md" ], "Advanced topics & developers" => [ "Conventions" =>"conventions.md", From f2f682a527c5d9f340e3b96beb461f8968ad759f Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 18 Mar 2024 10:54:07 +0000 Subject: [PATCH 144/166] Removed code partaining the coupling PR. --- .../elixir_advection_coupled.jl | 8 +++--- src/Trixi.jl | 3 --- .../coupling_converters.jl | 25 ------------------- .../semidiscretization_coupled.jl | 24 +++++++----------- 4 files changed, 12 insertions(+), 48 deletions(-) delete mode 100644 src/coupling_converters/coupling_converters.jl diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 4907128e13d..2a56d23f4c0 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -2,9 +2,7 @@ using OrdinaryDiffEq using Trixi ############################################################################### -# Coupled semidiscretization of two linear advection systems using converter functions such that -# the upper half of the domain is coupled periodically, while the lower half is not coupled -# and any incoming wave is completely absorbed. +# Coupled semidiscretization of two linear advection systems, which are connected periodically # # In this elixir, we have a square domain that is divided into a left half and a right half. On each # half of the domain, a completely independent SemidiscretizationHyperbolic is created for the @@ -96,8 +94,8 @@ semi = SemidiscretizationCoupled(semi1, semi2) ############################################################################### # ODE solvers, callbacks etc. -# Create ODE problem with time span from 0.0 to 20.0 -ode = semidiscretize(semi, (0.0, 20.0)); +# Create ODE problem with time span from 0.0 to 2.0 +ode = semidiscretize(semi, (0.0, 2.0)); # At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup # and resets the timers diff --git a/src/Trixi.jl b/src/Trixi.jl index fe63ee51908..b8110cf5bdd 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -200,9 +200,6 @@ export boundary_condition_do_nothing, BoundaryConditionNavierStokesWall, NoSlip, Adiabatic, Isothermal, BoundaryConditionCoupled -export coupling_converter_identity, - coupling_converter_heaviside_2d, coupling_converter_linear_2d - export initial_condition_convergence_test, source_terms_convergence_test export source_terms_harmonic export initial_condition_poisson_nonperiodic, source_terms_poisson_nonperiodic, diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl deleted file mode 100644 index 774986a503e..00000000000 --- a/src/coupling_converters/coupling_converters.jl +++ /dev/null @@ -1,25 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -@doc raw""" - coupling_converter_identity(semi::AbstractSemidiscretization, tspan) - -Identity coupling converter function. - -The coupling is given as a linear function. -```math -c(x) = u(x) -``` -""" -function coupling_converter_identity(equations::AbstractEquations) - return (x, u) -> u -end - -#################################################################################################### -# Include files with actual implementations for different systems of equations. - -end # @muladd diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 140a2ac00ab..1fb2ab02dc2 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -364,18 +364,15 @@ BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64) !!! warning "Experimental code" This is an experimental feature and can change any time. """ -mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices, - CouplingFunction} +mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices} # NDIMST2M1 == NDIMS * 2 - 1 # Buffer for boundary values: [variable, nodes_i, nodes_j, cell_i, cell_j] - u_boundary::Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 - other_semi_index::Int - other_orientation::Int - indices::Indices - coupling_converter::CouplingFunction - - function BoundaryConditionCoupled(other_semi_index, indices, uEltype, - coupling_converter) + u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 + other_semi_index :: Int + other_orientation :: Int + indices :: Indices + + function BoundaryConditionCoupled(other_semi_index, indices, uEltype) NDIMS = length(indices) u_boundary = Array{uEltype, NDIMS * 2 - 1}(undef, ntuple(_ -> 0, NDIMS * 2 - 1)) @@ -387,11 +384,8 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic other_orientation = 3 end - new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices), typeof(coupling_converter)}(u_boundary, - other_semi_index, - other_orientation, - indices, - coupling_converter) + new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices)}(u_boundary, other_semi_index, + other_orientation, indices) end end From 3500692460d67124a2bca5b60933600c614971f2 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 26 Jun 2023 16:31:21 +0100 Subject: [PATCH 145/166] Added coupling converters. --- src/Trixi.jl | 2 ++ .../coupling_converters.jl | 13 ++++++++++ .../coupling_converters_2d.jl | 25 +++++++++++++++++++ 3 files changed, 40 insertions(+) create mode 100644 src/coupling_converters/coupling_converters.jl create mode 100644 src/coupling_converters/coupling_converters_2d.jl diff --git a/src/Trixi.jl b/src/Trixi.jl index da7359999c5..d6a92106ad7 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -209,6 +209,8 @@ export boundary_condition_do_nothing, BoundaryConditionNavierStokesWall, NoSlip, Adiabatic, Isothermal, BoundaryConditionCoupled +export coupling_converter_heaviside_2d + export initial_condition_convergence_test, source_terms_convergence_test export source_terms_harmonic export initial_condition_poisson_nonperiodic, source_terms_poisson_nonperiodic, diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl new file mode 100644 index 00000000000..a82b849f284 --- /dev/null +++ b/src/coupling_converters/coupling_converters.jl @@ -0,0 +1,13 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +#################################################################################################### +# Include files with actual implementations for different systems of equations. + +include("coupling_converters_2d.jl") + +end # @muladd diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl new file mode 100644 index 00000000000..f18058632c0 --- /dev/null +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -0,0 +1,25 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + Coupling converter function for a system of two LinearScalarAdvectionEquation2D. + +The coupling is given as a Heaviside step. +```math +c(x) = {c_0, for x \ge x_0 \times s + 0, for x < x_0 \times s} +``` +Here, `s` is the sign of the step function, x_0 the save_position +of the step and c_0 the amplitude. +""" +function coupling_converter_heaviside_2d(x, x_0, c_0, s, + equations_left::LinearScalarAdvectionEquation2D, + equation_right::LinearScalarAdvectionEquation2D) + return c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 +end + +end # @muladd From 5a3b1a63e1db94003b0ee1cfcb802b3acb0c42c4 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Thu, 29 Jun 2023 14:23:06 +0100 Subject: [PATCH 146/166] Added generic converter_function for structured 2d meshes. --- .../coupling_converters_2d.jl | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index f18058632c0..e360ffc4eac 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -5,6 +5,7 @@ @muladd begin #! format: noindent + @doc raw""" Coupling converter function for a system of two LinearScalarAdvectionEquation2D. @@ -16,10 +17,21 @@ c(x) = {c_0, for x \ge x_0 \times s Here, `s` is the sign of the step function, x_0 the save_position of the step and c_0 the amplitude. """ -function coupling_converter_heaviside_2d(x, x_0, c_0, s, - equations_left::LinearScalarAdvectionEquation2D, - equation_right::LinearScalarAdvectionEquation2D) - return c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 +function coupling_converter_heaviside_2d(x_0, c_0, s) + return (x, u) -> c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 * u +end + + +@doc raw""" + Coupling converter function for a system of two LinearScalarAdvectionEquation2D. + +The coupling is given as a linear function. +```math +c(x) = x * u(x) +``` +""" +function coupling_converter_linear_2d() + return (x, u) -> x[2]*u end end # @muladd From c1a98b52ce56fe1dedbf52e4b45281b31ef62cd1 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 3 Jul 2023 16:33:47 +0100 Subject: [PATCH 147/166] Added example elixir for coupling converters. --- .../elixir_advection_coupled_converter.jl | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl new file mode 100644 index 00000000000..5546209aa19 --- /dev/null +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -0,0 +1,127 @@ +using OrdinaryDiffEq +using Trixi +using Trixi2Vtk + + +############################################################################### +# Coupled semidiscretization of two linear advection systems, which are connected periodically +# +# In this elixir, we have a square domain that is divided into a left half and a right half. On each +# half of the domain, a completely independent SemidiscretizationHyperbolic is created for the +# linear advection equations. The two systems are coupled in the x-direction and have periodic +# boundaries in the y-direction. For a high-level overview, see also the figure below: +# +# (-1, 1) ( 1, 1) +# ┌────────────────────┬────────────────────┐ +# │ ↑ periodic ↑ │ ↑ periodic ↑ │ +# │ │ │ +# │ │ │ +# │ ========= │ ========= │ +# │ system #1 │ system #2 │ +# │ ========= │ ========= │ +# │ │ │ +# │ │ │ +# │ │ │ +# │ │ │ +# │ coupled -->│<-- coupled │ +# │ │ │ +# │<-- coupled │ coupled -->│ +# │ │ │ +# │ │ │ +# │ ↓ periodic ↓ │ ↓ periodic ↓ │ +# └────────────────────┴────────────────────┘ +# (-1, -1) ( 1, -1) + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +# First mesh is the left half of a [-1,1]^2 square +coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max1 = ( 0.0, 1.0) # maximum coordinates (max(x), max(y)) + +# Define identical resolution as a variable such that it is easier to change from `trixi_include` +cells_per_dimension = (8, 16) + +cells_per_dimension1 = cells_per_dimension + +mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) + +coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) +# coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u + +# A semidiscretization collects data structures and functions for the spatial discretization +semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, + boundary_conditions=( + # Connect left boundary with right boundary of right mesh + x_neg=BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1), + # Connect right boundary with left boundary of right mesh + x_pos=BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1), + y_neg=boundary_condition_periodic, + y_pos=boundary_condition_periodic)) + + +# Second mesh is the right half of a [-1,1]^2 square +coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) + +cells_per_dimension2 = cells_per_dimension + +mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) + +coupling_function2 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) +# coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u + +semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, + boundary_conditions=( + # Connect left boundary with right boundary of left mesh + x_neg=BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2), + # Connect right boundary with left boundary of left mesh + x_pos=BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2), + y_neg=boundary_condition_periodic, + y_pos=boundary_condition_periodic)) + +# Create a semidiscretization that bundles semi1 and semi2 +semi = SemidiscretizationCoupled(semi1, semi2) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 2.0 +ode = semidiscretize(semi, (0.0, 20.0)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback1 = AnalysisCallback(semi1, interval=100) +analysis_callback2 = AnalysisCallback(semi2, interval=100) +analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval=1, + solution_variables=cons2prim) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl=1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) + + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); + +# Print the timer summary +summary_callback() + +# Convert the snapshot to vtk data. +trixi2vtk("out/solution_*.h5") From de4bd5f17c0d13e097d6d940b2ecf3dbceda4cec Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 3 Jul 2023 17:24:58 +0100 Subject: [PATCH 148/166] Cleaned up converter coupling elixir. --- .../elixir_advection_coupled_converter.jl | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl index 5546209aa19..8aada6bdb2b 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -1,10 +1,10 @@ using OrdinaryDiffEq using Trixi -using Trixi2Vtk - ############################################################################### -# Coupled semidiscretization of two linear advection systems, which are connected periodically +# Coupled semidiscretization of two linear advection systems using converter functions such that +# the upper half of the domain is coupled periodically, while the lower half is not coupled +# and any incoming wave is completely absorbed. # # In this elixir, we have a square domain that is divided into a left half and a right half. On each # half of the domain, a completely independent SemidiscretizationHyperbolic is created for the @@ -50,15 +50,19 @@ cells_per_dimension1 = cells_per_dimension mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) +# The user can define their own coupling functions. # coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1) + # A semidiscretization collects data structures and functions for the spatial discretization semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, boundary_conditions=( # Connect left boundary with right boundary of right mesh - x_neg=BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1), + x_neg=boundary_conditions_x_neg1, # Connect right boundary with left boundary of right mesh - x_pos=BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1), + x_pos=boundary_conditions_x_pos1, y_neg=boundary_condition_periodic, y_pos=boundary_condition_periodic)) @@ -74,12 +78,15 @@ mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) coupling_function2 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2) + semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, boundary_conditions=( # Connect left boundary with right boundary of left mesh - x_neg=BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2), + x_neg=boundary_conditions_x_neg2, # Connect right boundary with left boundary of left mesh - x_pos=BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2), + x_pos=boundary_conditions_x_pos2, y_neg=boundary_condition_periodic, y_pos=boundary_condition_periodic)) @@ -123,5 +130,3 @@ sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), # Print the timer summary summary_callback() -# Convert the snapshot to vtk data. -trixi2vtk("out/solution_*.h5") From 3cb41d36b5873e3ff4317ab207ce820682d09386 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Mon, 3 Jul 2023 17:25:17 +0100 Subject: [PATCH 149/166] Added equations in coupling converters. --- src/coupling_converters/coupling_converters_2d.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index e360ffc4eac..82ee854d826 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -17,7 +17,7 @@ c(x) = {c_0, for x \ge x_0 \times s Here, `s` is the sign of the step function, x_0 the save_position of the step and c_0 the amplitude. """ -function coupling_converter_heaviside_2d(x_0, c_0, s) +function coupling_converter_heaviside_2d(x_0, c_0, s, equations::LinearScalarAdvectionEquation2D) return (x, u) -> c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 * u end @@ -30,7 +30,7 @@ The coupling is given as a linear function. c(x) = x * u(x) ``` """ -function coupling_converter_linear_2d() +function coupling_converter_linear_2d(equations::LinearScalarAdvectionEquation2D) return (x, u) -> x[2]*u end From 64ec97dd7f204cf12e8c01c58416a3be59dbf02e Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 4 Jul 2023 14:49:24 +0100 Subject: [PATCH 150/166] Added identity converter function. --- src/Trixi.jl | 3 ++- .../coupling_converters.jl | 25 +++++++++++++++++++ .../coupling_converters_2d.jl | 1 - 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/Trixi.jl b/src/Trixi.jl index d6a92106ad7..53b722d7767 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -209,7 +209,8 @@ export boundary_condition_do_nothing, BoundaryConditionNavierStokesWall, NoSlip, Adiabatic, Isothermal, BoundaryConditionCoupled -export coupling_converter_heaviside_2d +export coupling_converter_identity, + coupling_converter_heaviside_2d, coupling_converter_linear_2d export initial_condition_convergence_test, source_terms_convergence_test export source_terms_harmonic diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index a82b849f284..53ebf900e41 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -5,6 +5,31 @@ @muladd begin #! format: noindent +@doc raw""" + coupling_converters + +Define converter functions for two coupled systems. +These should be used together with SemidiscretizationCoupled. +Using converter functions we can couple two systems that do not +share any variables. +This is done by taking the last inner point of system i, apply +a converter function on the state vector u_i and obtain a state +vector u_j for the boundary of system j. +""" + +@doc raw""" + Identity coupling converter function. + +The coupling is given as a linear function. +```math +c(x) = u(x) +``` +""" +function coupling_converter_identity(equations::AbstractEquations) + return (x, u) -> u +end + + #################################################################################################### # Include files with actual implementations for different systems of equations. diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index 82ee854d826..34011f4e1f7 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -5,7 +5,6 @@ @muladd begin #! format: noindent - @doc raw""" Coupling converter function for a system of two LinearScalarAdvectionEquation2D. From 21e9fd3f7a558f699216547f63146c82a4435bef Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 5 Jul 2023 15:59:58 +0100 Subject: [PATCH 151/166] Autoformat for converter coupling implementation. --- src/coupling_converters/coupling_converters.jl | 2 -- src/coupling_converters/coupling_converters_2d.jl | 9 ++++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index 53ebf900e41..5856cb373ea 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -29,10 +29,8 @@ function coupling_converter_identity(equations::AbstractEquations) return (x, u) -> u end - #################################################################################################### # Include files with actual implementations for different systems of equations. include("coupling_converters_2d.jl") - end # @muladd diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl index 34011f4e1f7..d527fe96894 100644 --- a/src/coupling_converters/coupling_converters_2d.jl +++ b/src/coupling_converters/coupling_converters_2d.jl @@ -16,11 +16,11 @@ c(x) = {c_0, for x \ge x_0 \times s Here, `s` is the sign of the step function, x_0 the save_position of the step and c_0 the amplitude. """ -function coupling_converter_heaviside_2d(x_0, c_0, s, equations::LinearScalarAdvectionEquation2D) - return (x, u) -> c_0 * (s*sign(x[2] - x_0) + 1.0)/2.0 * u +function coupling_converter_heaviside_2d(x_0, c_0, s, + equations::LinearScalarAdvectionEquation2D) + return (x, u) -> c_0 * (s * sign(x[2] - x_0) + 1.0) / 2.0 * u end - @doc raw""" Coupling converter function for a system of two LinearScalarAdvectionEquation2D. @@ -30,7 +30,6 @@ c(x) = x * u(x) ``` """ function coupling_converter_linear_2d(equations::LinearScalarAdvectionEquation2D) - return (x, u) -> x[2]*u + return (x, u) -> x[2] * u end - end # @muladd From 7aa4cfea176d30ffb30709607428973a6ef56329 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 5 Jul 2023 16:04:50 +0100 Subject: [PATCH 152/166] Added coupled converter elixir. --- .../elixir_advection_coupled_converter.jl | 76 ++++++++++--------- 1 file changed, 40 insertions(+), 36 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl index 8aada6bdb2b..18009ef4f92 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -36,11 +36,11 @@ advection_velocity = (0.2, -0.7) equations = LinearScalarAdvectionEquation2D(advection_velocity) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) # First mesh is the left half of a [-1,1]^2 square coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max1 = ( 0.0, 1.0) # maximum coordinates (max(x), max(y)) +coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) # Define identical resolution as a variable such that it is easier to change from `trixi_include` cells_per_dimension = (8, 16) @@ -53,23 +53,25 @@ coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # The user can define their own coupling functions. # coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u -boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1) -boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, coupling_function1) +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, + coupling_function1) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, + coupling_function1) # A semidiscretization collects data structures and functions for the spatial discretization -semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, - boundary_conditions=( - # Connect left boundary with right boundary of right mesh - x_neg=boundary_conditions_x_neg1, - # Connect right boundary with left boundary of right mesh - x_pos=boundary_conditions_x_pos1, - y_neg=boundary_condition_periodic, - y_pos=boundary_condition_periodic)) - +semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, + solver, + boundary_conditions = ( + # Connect left boundary with right boundary of right mesh + x_neg = boundary_conditions_x_neg1, + # Connect right boundary with left boundary of right mesh + x_pos = boundary_conditions_x_pos1, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic)) # Second mesh is the right half of a [-1,1]^2 square coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) +coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) cells_per_dimension2 = cells_per_dimension @@ -78,17 +80,20 @@ mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) coupling_function2 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # coupling_function2 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u -boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, coupling_function2) -boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, coupling_function2) - -semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, - boundary_conditions=( - # Connect left boundary with right boundary of left mesh - x_neg=boundary_conditions_x_neg2, - # Connect right boundary with left boundary of left mesh - x_pos=boundary_conditions_x_pos2, - y_neg=boundary_condition_periodic, - y_pos=boundary_condition_periodic)) +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, + coupling_function2) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, + coupling_function2) + +semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, + solver, + boundary_conditions = ( + # Connect left boundary with right boundary of left mesh + x_neg = boundary_conditions_x_neg2, + # Connect right boundary with left boundary of left mesh + x_pos = boundary_conditions_x_pos2, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic)) # Create a semidiscretization that bundles semi1 and semi2 semi = SemidiscretizationCoupled(semi1, semi2) @@ -104,29 +109,28 @@ ode = semidiscretize(semi, (0.0, 20.0)); summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results -analysis_callback1 = AnalysisCallback(semi1, interval=100) -analysis_callback2 = AnalysisCallback(semi2, interval=100) +analysis_callback1 = AnalysisCallback(semi1, interval = 100) +analysis_callback2 = AnalysisCallback(semi2, interval = 100) analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) # The SaveSolutionCallback allows to save the solution to a file in regular intervals -save_solution = SaveSolutionCallback(interval=1, - solution_variables=cons2prim) +save_solution = SaveSolutionCallback(interval = 1, + solution_variables = cons2prim) # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step -stepsize_callback = StepsizeCallback(cfl=1.6) +stepsize_callback = StepsizeCallback(cfl = 1.6) # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) - +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, + stepsize_callback) ############################################################################### # run the simulation # OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks -sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), - dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep=false, callback=callbacks); +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); # Print the timer summary summary_callback() - From bb047ed5d9c19205b8b65a47c3061753ea443b2c Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 18 Jul 2023 16:36:05 +0100 Subject: [PATCH 153/166] Removed redundant doc string. --- src/coupling_converters/coupling_converters.jl | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index 5856cb373ea..3c2e08e34ac 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -5,18 +5,6 @@ @muladd begin #! format: noindent -@doc raw""" - coupling_converters - -Define converter functions for two coupled systems. -These should be used together with SemidiscretizationCoupled. -Using converter functions we can couple two systems that do not -share any variables. -This is done by taking the last inner point of system i, apply -a converter function on the state vector u_i and obtain a state -vector u_j for the boundary of system j. -""" - @doc raw""" Identity coupling converter function. From 5fbe20314b7a1546485e61edd7962145a45b69b3 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 18 Jul 2023 16:40:06 +0100 Subject: [PATCH 154/166] Added function signature in doc string. --- src/coupling_converters/coupling_converters.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index 3c2e08e34ac..b4ecb96bf62 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -6,6 +6,7 @@ #! format: noindent @doc raw""" + coupling_converter_identity(semi::AbstractSemidiscretization, tspan) Identity coupling converter function. The coupling is given as a linear function. From e5cec5b6cf1c61b8580fdc161679fa83a5045f82 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 19 Jul 2023 12:21:56 +0100 Subject: [PATCH 155/166] Update make.jl Added interface coupling docs to the main menu. --- docs/make.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/make.jl b/docs/make.jl index 8427c4049bf..a4e448904ed 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -126,6 +126,7 @@ makedocs( "Testing" => "testing.md", "Performance" => "performance.md", "Parallelization" => "parallelization.md", + "Coupling" => "coupling.md" ], "Troubleshooting and FAQ" => "troubleshooting.md", "Reference" => [ From 69953402b8229fa6b677739b8931183e23ee0e84 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 19 Jul 2023 12:36:22 +0100 Subject: [PATCH 156/166] Create coupling.md --- docs/src/coupling.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/src/coupling.md diff --git a/docs/src/coupling.md b/docs/src/coupling.md new file mode 100644 index 00000000000..e0a9d916e88 --- /dev/null +++ b/docs/src/coupling.md @@ -0,0 +1 @@ +# [Coupling](@id coupling-id) From 9c2b0f2aebdcf772db58ba2eb80cf4222d1fd8db Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 19 Jul 2023 13:05:08 +0100 Subject: [PATCH 157/166] Update coupling.md Added some documentation on coupling converters. --- docs/src/coupling.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/src/coupling.md b/docs/src/coupling.md index e0a9d916e88..cce3594e140 100644 --- a/docs/src/coupling.md +++ b/docs/src/coupling.md @@ -1 +1,33 @@ # [Coupling](@id coupling-id) +A complex simulation can consist of different spatial domains in which +different equations are being solved, different numerical methods being used +or the grid structure is different. +One example would be a fluid in a tank and an extended hot plate attached to it. +We would then like to solve the Navier-Stokes equations in the fluid domain +and the heat conduction equations in the plate. +The coupling would happen at the interface through the exchange of thermal energy. + +Another type of coupling is bulk or volume coupling. +There we have at least two systems that share all or parts of the domain. +We could, for instance, have a Maxwell system and a fluid system. +The coupling would then occur through the Lorentz force. + + +## Converter Coupling +We can have the case where the two systems do not share any variables, but +share some of the physics. +Here, the same physics is just represented in a different form and with +different variables. +This is the case for a fluid system on one side and a Vlasov system on the other. +To translate the fields from one description to the other one needs to use +converter functions. + +In the general case we have one system with `m` variables `u_i` and another +system with `n` variables `v_j`. +We then define two coupling functions, one that transforms `u_i` into `v_i` +and one that goes the other way. + +In their minimal form they take the position vector `x` and state vector `u` +and return the transformed variables. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl` +and in `src/coupling_converters/coupling_converters_2d.jl`. From b4eb394f265c18daa60f09053762edf8ec111605 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Thu, 20 Jul 2023 12:37:41 +0100 Subject: [PATCH 158/166] Chenged coupling converter function. --- .../structured_2d_dgsem/elixir_advection_coupled_converter.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl index 18009ef4f92..766fbff8a14 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl @@ -49,9 +49,8 @@ cells_per_dimension1 = cells_per_dimension mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) -coupling_function1 = coupling_converter_heaviside_2d(-0.5, 1.0, 1.0, equations) # The user can define their own coupling functions. -# coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u +coupling_function1 = (x, u) -> (sign(x[2] - 0.0)*0.1 + 1.0)/1.1 * u boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, coupling_function1) From a6b8e8cccdca06dcad5832a7a5eaf12540434f54 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 14:56:15 +0100 Subject: [PATCH 159/166] Removed volume coupling from documentation to avoit confusion. --- docs/src/coupling.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/src/coupling.md b/docs/src/coupling.md index cce3594e140..a10b90f7ee8 100644 --- a/docs/src/coupling.md +++ b/docs/src/coupling.md @@ -7,11 +7,6 @@ We would then like to solve the Navier-Stokes equations in the fluid domain and the heat conduction equations in the plate. The coupling would happen at the interface through the exchange of thermal energy. -Another type of coupling is bulk or volume coupling. -There we have at least two systems that share all or parts of the domain. -We could, for instance, have a Maxwell system and a fluid system. -The coupling would then occur through the Lorentz force. - ## Converter Coupling We can have the case where the two systems do not share any variables, but From ffe98096028218a0f6d3cd5e9aeab34a7931bac0 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 14:59:34 +0100 Subject: [PATCH 160/166] Removed redundant converter function for coupling. --- .../coupling_converters_2d.jl | 35 ------------------- 1 file changed, 35 deletions(-) delete mode 100644 src/coupling_converters/coupling_converters_2d.jl diff --git a/src/coupling_converters/coupling_converters_2d.jl b/src/coupling_converters/coupling_converters_2d.jl deleted file mode 100644 index d527fe96894..00000000000 --- a/src/coupling_converters/coupling_converters_2d.jl +++ /dev/null @@ -1,35 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -@doc raw""" - Coupling converter function for a system of two LinearScalarAdvectionEquation2D. - -The coupling is given as a Heaviside step. -```math -c(x) = {c_0, for x \ge x_0 \times s - 0, for x < x_0 \times s} -``` -Here, `s` is the sign of the step function, x_0 the save_position -of the step and c_0 the amplitude. -""" -function coupling_converter_heaviside_2d(x_0, c_0, s, - equations::LinearScalarAdvectionEquation2D) - return (x, u) -> c_0 * (s * sign(x[2] - x_0) + 1.0) / 2.0 * u -end - -@doc raw""" - Coupling converter function for a system of two LinearScalarAdvectionEquation2D. - -The coupling is given as a linear function. -```math -c(x) = x * u(x) -``` -""" -function coupling_converter_linear_2d(equations::LinearScalarAdvectionEquation2D) - return (x, u) -> x[2] * u -end -end # @muladd From 6f5ff694d4b2374aa6f9149ef19d632b93f1f876 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 25 Jul 2023 15:00:33 +0100 Subject: [PATCH 161/166] Removed redundant coupling converter file mentioned in some files. --- docs/src/coupling.md | 3 +-- src/coupling_converters/coupling_converters.jl | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/src/coupling.md b/docs/src/coupling.md index a10b90f7ee8..10194801cab 100644 --- a/docs/src/coupling.md +++ b/docs/src/coupling.md @@ -24,5 +24,4 @@ and one that goes the other way. In their minimal form they take the position vector `x` and state vector `u` and return the transformed variables. -Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl` -and in `src/coupling_converters/coupling_converters_2d.jl`. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled_converter.jl`. diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index b4ecb96bf62..b97801883d6 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -21,5 +21,4 @@ end #################################################################################################### # Include files with actual implementations for different systems of equations. -include("coupling_converters_2d.jl") end # @muladd From fa4e5f9292f328c4026a0d8f2b2875b1c70f8b6e Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Tue, 25 Jul 2023 14:57:51 +0100 Subject: [PATCH 162/166] Update src/coupling_converters/coupling_converters.jl Co-authored-by: Hendrik Ranocha --- src/coupling_converters/coupling_converters.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/coupling_converters/coupling_converters.jl b/src/coupling_converters/coupling_converters.jl index b97801883d6..774986a503e 100644 --- a/src/coupling_converters/coupling_converters.jl +++ b/src/coupling_converters/coupling_converters.jl @@ -7,7 +7,8 @@ @doc raw""" coupling_converter_identity(semi::AbstractSemidiscretization, tspan) - Identity coupling converter function. + +Identity coupling converter function. The coupling is given as a linear function. ```math From 311dbfe676327151e648f6de9e189a6726642fb5 Mon Sep 17 00:00:00 2001 From: SimonCan Date: Wed, 2 Aug 2023 16:39:22 +0100 Subject: [PATCH 163/166] Added memory allocation for p4est boundary. --- .../semidiscretization_coupled.jl | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index dc21dbe9a1e..36af1190c75 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -467,6 +467,24 @@ end # In 2D function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2}, direction, mesh, equations, dg::DGSEM) + @autoinfiltrate + if direction in (1, 2) + cell_size = size(mesh, 2) + else + cell_size = size(mesh, 1) + end + + uEltype = eltype(boundary_condition) + boundary_condition.u_boundary = Array{uEltype, 3}(undef, nvariables(equations), + nnodes(dg), + cell_size) +end + +# In 2D +function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2 + }, + direction, mesh::P4estMesh, equations, dg::DGSEM) + @autoinfiltrate if direction in (1, 2) cell_size = size(mesh, 2) else From 978064263c2d0eeff96ecdc98e64dcd41f42d61d Mon Sep 17 00:00:00 2001 From: SimonCan Date: Tue, 8 Aug 2023 10:58:17 +0100 Subject: [PATCH 164/166] Added ew struct members of p4est to the io. --- src/meshes/mesh_io.jl | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 28e6efa8c57..b59324de386 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -177,6 +177,9 @@ function save_mesh_file(mesh::P4estMesh, output_directory, timestep, # to increase the runtime performance # but HDF5 can only handle plain arrays file["boundary_names"] = mesh.boundary_names .|> String + file["coordinates_min"] = Vector(mesh.coordinates_min) + file["coordinates_max"] = Vector(mesh.coordinates_max) + file["trees_per_dimension"] = Vector(mesh.trees_per_dimension) end return filename @@ -217,6 +220,9 @@ function save_mesh_file(mesh::P4estMesh, output_directory, timestep, mpi_paralle # to increase the runtime performance # but HDF5 can only handle plain arrays file["boundary_names"] = mesh.boundary_names .|> String + file["coordinates_min"] = mesh.coordinates_min + file["coordinates_max"] = mesh.coordinates_max + file["trees_per_dimension"] = mesh.trees_per_dimension end return filename @@ -301,11 +307,15 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) unsaved_changes = false) elseif mesh_type == "P4estMesh" p4est_filename, tree_node_coordinates, - nodes, boundary_names_ = h5open(mesh_file, "r") do file + nodes, boundary_names_, coordinates_min, + coordinates_max, trees_per_dimension = h5open(mesh_file, "r") do file return read(attributes(file)["p4est_file"]), read(file["tree_node_coordinates"]), read(file["nodes"]), - read(file["boundary_names"]) + read(file["boundary_names"]), + read(file["coordinates_min"]), + read(file["coordinates_max"]), + read(file["trees_per_dimension"]) end boundary_names = boundary_names_ .|> Symbol @@ -317,7 +327,8 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) p4est = load_p4est(p4est_file, Val(ndims)) mesh = P4estMesh{ndims}(p4est, tree_node_coordinates, - nodes, boundary_names, "", false, true) + nodes, boundary_names, "", false, true, + coordinates_min, coordinates_max, trees_per_dimension) else error("Unknown mesh type!") end @@ -381,12 +392,16 @@ function load_mesh_parallel(mesh_file::AbstractString; n_cells_max, RealT) elseif mesh_type == "P4estMesh" if mpi_isroot() p4est_filename, tree_node_coordinates, - nodes, boundary_names_ = h5open(mesh_file, "r") do file + nodes, boundary_names_, coordinates_min, + coordinates_max, trees_per_dimension = h5open(mesh_file, "r") do file return read(attributes(file)["p4est_file"]), read(file["tree_node_coordinates"]), read(file["nodes"]), - read(file["boundary_names"]) - end + read(file["boundary_names"]), + read(file["coordinates_min"]), + read(file["coordinates_max"]), + read(file["trees_per_dimension"]) + end boundary_names = boundary_names_ .|> Symbol From 738116223804d5a278425bf07348de598fea8bc2 Mon Sep 17 00:00:00 2001 From: iomsn Date: Tue, 19 Mar 2024 15:08:23 +0000 Subject: [PATCH 165/166] REmvoed conflicting lines. --- .../semidiscretization_coupled.jl | 22 +++---------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index ce730ce301c..8ab3eba0ce7 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -495,7 +495,7 @@ function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditi uEltype = eltype(boundary_condition) boundary_condition.u_boundary = Array{uEltype, 3}(undef, nvariables(equations), nnodes(dg), - cell_size) + cell_size) end # In 2D for a p4est mesh. @@ -561,7 +561,6 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ mesh_own, equations_own, solver_own, cache_own = mesh_equations_solver_cache(semi) -<<<<<<< HEAD mesh_other, equations_other, solver_other, cache_other = mesh_equations_solver_cache(other_semi_index, 1, semi_coupled.semis...) @@ -577,7 +576,8 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ cells = axes(mesh_other, 2) else # other_orientation == 2 cells = axes(mesh_other, 1) -======= + end + if mesh isa P4estMesh linear_indices = LinearIndices((mesh.trees_per_dimension[1], mesh.trees_per_dimension[2])) else @@ -596,7 +596,6 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ else # other_orientation == 2 cells = axes(mesh, 1) end ->>>>>>> f2f682a527c5d9f340e3b96beb461f8968ad759f end # Copy solution data to the coupled boundary using "delayed indexing" with @@ -605,10 +604,8 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ i_node_start, i_node_step = index_to_start_step_2d(indices[1], node_index_range) j_node_start, j_node_step = index_to_start_step_2d(indices[2], node_index_range) -<<<<<<< HEAD i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh_other, 1)) j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh_other, 2)) -======= if mesh isa P4estMesh i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], mesh.trees_per_dimension[1]) j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], mesh.trees_per_dimension[2]) @@ -616,7 +613,6 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh, 1)) j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh, 2)) end ->>>>>>> f2f682a527c5d9f340e3b96beb461f8968ad759f i_cell = i_cell_start j_cell = j_cell_start @@ -627,7 +623,6 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ j_node = j_node_start element_id = linear_indices[i_cell, j_cell] -<<<<<<< HEAD for element_id in eachnode(solver_other) x_other = get_node_coords(node_coordinates_other, equations_other, solver_other, @@ -640,17 +635,6 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ for i in eachindex(u_node_converted) u_boundary[i, element_id, cell] = u_node_converted[i] -======= - for i in eachnode(solver) - for v in 1:size(u, 1) - x = cache.elements.node_coordinates[:, i_node, j_node, - linear_indices[i_cell, j_cell]] - converted_u = boundary_condition.coupling_converter(x, - u[:, i_node, j_node, - linear_indices[i_cell, - j_cell]]) - boundary_condition.u_boundary[v, i, cell] = converted_u[v] ->>>>>>> f2f682a527c5d9f340e3b96beb461f8968ad759f end i_node += i_node_step From f67e3223b0e9e6ce3dd44a23faafd6d7ab1591eb Mon Sep 17 00:00:00 2001 From: iomsn Date: Tue, 19 Mar 2024 15:28:48 +0000 Subject: [PATCH 166/166] Revert "Docstrings for some methods in basis Lobatto-Legendre (#1874)" This reverts commit 2dfde7faf3cc74f066d86148ae6c99ed9e58fa79. --- .../src/files/scalar_linear_advection_1d.jl | 2 +- src/solvers/dgsem/basis_lobatto_legendre.jl | 77 ++----------------- 2 files changed, 7 insertions(+), 72 deletions(-) diff --git a/docs/literate/src/files/scalar_linear_advection_1d.jl b/docs/literate/src/files/scalar_linear_advection_1d.jl index 9b48f29d341..77ba7b087cc 100644 --- a/docs/literate/src/files/scalar_linear_advection_1d.jl +++ b/docs/literate/src/files/scalar_linear_advection_1d.jl @@ -115,7 +115,7 @@ integral = sum(nodes.^3 .* weights) # To approximate the solution, we need to get the polynomial coefficients $\{u_j^{Q_l}\}_{j=0}^N$ # for every element $Q_l$. -# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0 = u(t_0)$ +# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0$ # for every node. x = Matrix{Float64}(undef, length(nodes), n_elements) for element in 1:n_elements diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index cac1dba9c74..9e21b88dfa1 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -404,8 +404,7 @@ function calc_dsplit(nodes, weights) return dsplit end -# Calculate the polynomial derivative matrix D. -# This implements algorithm 37 "PolynomialDerivativeMatrix" from Kopriva's book. +# Calculate the polynomial derivative matrix D function polynomial_derivative_matrix(nodes) n_nodes = length(nodes) d = zeros(n_nodes, n_nodes) @@ -422,7 +421,6 @@ function polynomial_derivative_matrix(nodes) end # Calculate and interpolation matrix (Vandermonde matrix) between two given sets of nodes -# See algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix(nodes_in, nodes_out, baryweights_in = barycentric_weights(nodes_in)) n_nodes_in = length(nodes_in) @@ -435,7 +433,6 @@ function polynomial_interpolation_matrix(nodes_in, nodes_out, return vandermonde end -# This implements algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix!(vandermonde, nodes_in, nodes_out, baryweights_in) @@ -466,19 +463,7 @@ function polynomial_interpolation_matrix!(vandermonde, return vandermonde end -""" - barycentric_weights(nodes) - -Calculate the barycentric weights for a given node distribution, i.e., -```math -w_j = \\frac{1}{ \\prod_{k \\neq j} \\left( x_j - x_k \\right ) } -``` - -For details, see (especially Section 3) -- Jean-Paul Berrut and Lloyd N. Trefethen (2004). - Barycentric Lagrange Interpolation. - [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) -""" +# Calculate the barycentric weights for a given node distribution. function barycentric_weights(nodes) n_nodes = length(nodes) weights = ones(n_nodes) @@ -509,31 +494,12 @@ function calc_lhat(x, nodes, weights) return lhat end -""" - lagrange_interpolating_polynomials(x, nodes, wbary) - -Calculate Lagrange polynomials for a given node distribution with -associated barycentric weights `wbary` at a given point `x` on the -reference interval ``[-1, 1]``. - -This returns all ``l_j(x)``, i.e., the Lagrange polynomials for each node ``x_j``. -Thus, to obtain the interpolating polynomial ``p(x)`` at ``x``, one has to -multiply the Lagrange polynomials with the nodal values ``u_j`` and sum them up: -``p(x) = \\sum_{j=1}^{n} u_j l_j(x)``. - -For details, see e.g. Section 2 of -- Jean-Paul Berrut and Lloyd N. Trefethen (2004). - Barycentric Lagrange Interpolation. - [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) -""" +# Calculate Lagrange polynomials for a given node distribution. function lagrange_interpolating_polynomials(x, nodes, wbary) n_nodes = length(nodes) polynomials = zeros(n_nodes) for i in 1:n_nodes - # Avoid division by zero when `x` is close to node by using - # the Kronecker-delta property at nodes - # of the Lagrange interpolation polynomials. if isapprox(x, nodes[i], rtol = eps(x)) polynomials[i] = 1 return polynomials @@ -552,17 +518,6 @@ function lagrange_interpolating_polynomials(x, nodes, wbary) return polynomials end -""" - gauss_lobatto_nodes_weights(n_nodes::Integer) - -Computes nodes ``x_j`` and weights ``w_j`` for the (Legendre-)Gauss-Lobatto quadrature. -This implements algorithm 25 "GaussLobattoNodesAndWeights" from the book - -- David A. Kopriva, (2009). - Implementing spectral methods for partial differential equations: - Algorithms for scientists and engineers. - [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) -""" # From FLUXO (but really from blue book by Kopriva) function gauss_lobatto_nodes_weights(n_nodes::Integer) # From Kopriva's book @@ -630,7 +585,7 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) return nodes, weights end -# From FLUXO (but really from blue book by Kopriva, algorithm 24) +# From FLUXO (but really from blue book by Kopriva) function calc_q_and_l(N::Integer, x::Float64) L_Nm2 = 1.0 L_Nm1 = x @@ -654,17 +609,7 @@ function calc_q_and_l(N::Integer, x::Float64) end calc_q_and_l(N::Integer, x::Real) = calc_q_and_l(N, convert(Float64, x)) -""" - gauss_nodes_weights(n_nodes::Integer) - -Computes nodes ``x_j`` and weights ``w_j`` for the Gauss-Legendre quadrature. -This implements algorithm 23 "LegendreGaussNodesAndWeights" from the book - -- David A. Kopriva, (2009). - Implementing spectral methods for partial differential equations: - Algorithms for scientists and engineers. - [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) -""" +# From FLUXO (but really from blue book by Kopriva) function gauss_nodes_weights(n_nodes::Integer) # From Kopriva's book n_iterations = 10 @@ -721,17 +666,7 @@ function gauss_nodes_weights(n_nodes::Integer) end end -""" - legendre_polynomial_and_derivative(N::Int, x::Real) - -Computes the Legendre polynomial of degree `N` and its derivative at `x`. -This implements algorithm 22 "LegendrePolynomialAndDerivative" from the book - -- David A. Kopriva, (2009). - Implementing spectral methods for partial differential equations: - Algorithms for scientists and engineers. - [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) -""" +# From FLUXO (but really from blue book by Kopriva) function legendre_polynomial_and_derivative(N::Int, x::Real) if N == 0 poly = 1.0