diff --git a/Project.toml b/Project.toml index 13026118289..87be80456c3 100644 --- a/Project.toml +++ b/Project.toml @@ -82,7 +82,7 @@ IfElse = "0.1" LinearAlgebra = "1" LinearMaps = "2.7, 3.0" LoopVectorization = "0.12.151" -MPI = "0.20" +MPI = "0.20.6" Makie = "0.19, 0.20" MuladdMacro = "0.2.2" NLsolve = "4.5.1" diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl index 618be7f8965..6ef227d882b 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl @@ -115,6 +115,11 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval, alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + amr_controller = ControllerThreeLevel(semi, TrixiExtension.IndicatorSolutionIndependent(semi), base_level = 4, @@ -124,12 +129,20 @@ amr_controller = ControllerThreeLevel(semi, amr_callback = AMRCallback(semi, amr_controller, interval = 5, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + dynamic_load_balancing = false) +# We disable `dynamic_load_balancing` for now, since t8code does not support +# partitioning for coarsening yet. That is, a complete family of elements always +# stays on rank and is not split up due to partitioning. Without this feature +# dynamic AMR simulations are not perfectly deterministic regarding to +# convergent tests. Once this feature is available in t8code load balancing is +# enabled again. stepsize_callback = StepsizeCallback(cfl = 1.6) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, amr_callback, stepsize_callback); ############################################################################### diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index c9c831d3471..d19b9230c5c 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -54,6 +54,14 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval, alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), base_level = 1, med_level = 2, med_threshold = 0.1, @@ -62,12 +70,19 @@ amr_callback = AMRCallback(semi, amr_controller, interval = 5, adapt_initial_condition = true, adapt_initial_condition_only_refine = true, - dynamic_load_balancing = true) + dynamic_load_balancing = false) +# We disable `dynamic_load_balancing` for now, since t8code does not support +# partitioning for coarsening yet. That is, a complete family of elements always +# stays on rank and is not split up due to partitioning. Without this feature +# dynamic AMR simulations are not perfectly deterministic regarding to +# convergent tests. Once this feature is available in t8code load balancing is +# enabled again. stepsize_callback = StepsizeCallback(cfl = 0.7) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_restart, save_solution, amr_callback, stepsize_callback) ############################################################################### diff --git a/examples/t8code_2d_dgsem/elixir_advection_extended.jl b/examples/t8code_2d_dgsem/elixir_advection_extended.jl new file mode 100644 index 00000000000..034197ce9d8 --- /dev/null +++ b/examples/t8code_2d_dgsem/elixir_advection_extended.jl @@ -0,0 +1,89 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +initial_condition = initial_condition_convergence_test + +# BCs must be passed as Dict +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:x_neg => boundary_condition, + :x_pos => boundary_condition, + :y_neg => boundary_condition, + :y_pos => boundary_condition) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# The initial condition is 2-periodic +coordinates_min = (-1.5, 1.3) # minimum coordinates (min(x), min(y)) +coordinates_max = (0.5, 5.3) # maximum coordinates (max(x), max(y)) + +trees_per_dimension = (19, 37) + +# Create curved mesh with 19 x 37 elements +mesh = T8codeMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = false) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_integrals = (entropy, energy_total)) + +# The AliveCallback prints short status information in regular intervals +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +# The SaveRestartCallback allows to save a file from which a Trixi.jl simulation can be restarted +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_restart, save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() + +# Finalize `T8codeMesh` to make sure MPI related objects in t8code are +# released before `MPI` finalizes. +!isinteractive() && finalize(mesh) diff --git a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl index a8808f9ab72..cb14a7c23ae 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl @@ -69,11 +69,16 @@ summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results analysis_callback = AnalysisCallback(semi, interval = 100) +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step stepsize_callback = StepsizeCallback(cfl = 1.6) # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback) +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, + stepsize_callback) ############################################################################### # run the simulation diff --git a/examples/t8code_2d_dgsem/elixir_advection_restart.jl b/examples/t8code_2d_dgsem/elixir_advection_restart.jl new file mode 100644 index 00000000000..9438eb4c38f --- /dev/null +++ b/examples/t8code_2d_dgsem/elixir_advection_restart.jl @@ -0,0 +1,47 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# create a restart file + +elixir_file = "elixir_advection_extended.jl" +restart_file = "restart_000000021.h5" + +trixi_include(@__MODULE__, joinpath(@__DIR__, elixir_file)) + +############################################################################### +# adapt the parameters that have changed compared to "elixir_advection_extended.jl" + +# Note: If you get a restart file from somewhere else, you need to provide +# appropriate setups in the elixir loading a restart file + +restart_filename = joinpath("out", restart_file) +mesh = load_mesh(restart_filename) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +tspan = (load_time(restart_filename), 2.0) +dt = load_dt(restart_filename) +ode = semidiscretize(semi, tspan, restart_filename); + +# Do not overwrite the initial snapshot written by elixir_advection_extended.jl. +save_solution.condition.save_initial_solution = false + +integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks, maxiters = 100_000); + +# Get the last time index and work with that. +load_timestep!(integrator, restart_filename) + +############################################################################### +# run the simulation + +sol = solve!(integrator) +summary_callback() # print the timer summary + +# Finalize `T8codeMesh` to make sure MPI related objects in t8code are +# released before `MPI` finalizes. +!isinteractive() && finalize(mesh) diff --git a/examples/t8code_2d_dgsem/elixir_advection_restart_amr.jl b/examples/t8code_2d_dgsem/elixir_advection_restart_amr.jl new file mode 100644 index 00000000000..722338ccfdb --- /dev/null +++ b/examples/t8code_2d_dgsem/elixir_advection_restart_amr.jl @@ -0,0 +1,66 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# create a restart file + +elixir_file = "elixir_advection_extended.jl" +restart_file = "restart_000000021.h5" + +trixi_include(@__MODULE__, joinpath(@__DIR__, elixir_file)) + +############################################################################### +# adapt the parameters that have changed compared to "elixir_advection_extended.jl" + +# Note: If you get a restart file from somewhere else, you need to provide +# appropriate setups in the elixir loading a restart file + +restart_filename = joinpath("out", restart_file) +mesh = load_mesh(restart_filename) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +tspan = (load_time(restart_filename), 2.0) +dt = load_dt(restart_filename) +ode = semidiscretize(semi, tspan, restart_filename); + +# Do not overwrite the initial snapshot written by elixir_advection_extended.jl. +save_solution.condition.save_initial_solution = false + +# Add AMR callback +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), + base_level = 0, + med_level = 0, med_threshold = 0.8, + max_level = 1, max_threshold = 1.2) +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true, + dynamic_load_balancing = false) +# We disable `dynamic_load_balancing` for now, since t8code does not support +# partitioning for coarsening yet. That is, a complete family of elements always +# stays on rank and is not split up due to partitioning. Without this feature +# dynamic AMR simulations are not perfectly deterministic regarding to +# convergent tests. Once this feature is available in t8code load balancing is +# enabled again. + +callbacks_ext = CallbackSet(amr_callback, callbacks.discrete_callbacks...) + +integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks_ext, maxiters = 100_000); + +# Get the last time index and work with that. +load_timestep!(integrator, restart_filename) + +############################################################################### +# run the simulation + +sol = solve!(integrator) +summary_callback() # print the timer summary + +# Finalize `T8codeMesh` to make sure MPI related objects in t8code are +# released before `MPI` finalizes. +!isinteractive() && finalize(mesh) diff --git a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl index f7917fb03b9..f13f0fae05a 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -53,13 +53,18 @@ summary_callback = SummaryCallback() # prints the results. analysis_callback = AnalysisCallback(semi, interval = 100) +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + # The StepsizeCallback handles the re-calculation of the maximum Δt after each # time step. stepsize_callback = StepsizeCallback(cfl = 1.4) # Create a CallbackSet to collect all callbacks such that they can be passed to # the ODE solver. -callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback) +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, + stepsize_callback) ############################################################################### # Run the simulation. diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl index 34e6d7d8c0c..349678f1637 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl @@ -72,10 +72,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + stepsize_callback = StepsizeCallback(cfl = 2.0) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl index e1c51c1f96b..e8ec3a7a9e6 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl @@ -79,11 +79,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 300, + save_initial_solution = true, + save_final_solution = true) + stepsize_callback = StepsizeCallback(cfl = 0.5) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl index 92ed169756b..a7d10006593 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl @@ -50,11 +50,17 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + stepsize_callback = StepsizeCallback(cfl = 1.0) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 59e5f996918..9f9ad16dc9b 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -73,10 +73,19 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + stepsize_callback = StepsizeCallback(cfl = 0.8) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_restart, save_solution, stepsize_callback) ############################################################################### # run the simulation diff --git a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl index 3fafbf8a4c1..1476dfa05c7 100644 --- a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl @@ -56,6 +56,14 @@ summary_callback = SummaryCallback() stepsize_callback = StepsizeCallback(cfl = 0.8) +save_solution = SaveSolutionCallback(interval = 10, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + analysis_interval = 100 alive_callback = AliveCallback(analysis_interval = analysis_interval) @@ -63,6 +71,7 @@ analysis_callback = AnalysisCallback(semi_euler, interval = analysis_interval, save_analysis = true) callbacks = CallbackSet(summary_callback, stepsize_callback, + save_restart, save_solution, analysis_callback, alive_callback) ############################################################################### diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl index 82e6d8ca4a6..fa8d27a01b4 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl @@ -93,6 +93,11 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + amr_indicator = IndicatorLöhner(semi, variable = density_pressure) @@ -103,7 +108,14 @@ amr_controller = ControllerThreeLevel(semi, amr_indicator, amr_callback = AMRCallback(semi, amr_controller, interval = 5, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + dynamic_load_balancing = false) +# We disable `dynamic_load_balancing` for now, since t8code does not support +# partitioning for coarsening yet. That is, a complete family of elements always +# stays on rank and is not split up due to partitioning. Without this feature +# dynamic AMR simulations are not perfectly deterministic regarding to +# convergent tests. Once this feature is available in t8code load balancing is +# enabled again. cfl = 0.5 stepsize_callback = StepsizeCallback(cfl = cfl) @@ -113,6 +125,7 @@ glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, amr_callback, stepsize_callback, glm_speed_callback) diff --git a/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl index 9ce248ff3b5..0a937664493 100644 --- a/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl +++ b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl @@ -46,7 +46,11 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback) +save_solution = SaveSolutionCallback(interval = 200, + save_initial_solution = true, + save_final_solution = true) + +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) ############################################################################### # run the simulation diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr.jl b/examples/t8code_3d_dgsem/elixir_advection_amr.jl index 83f897dac7c..7db6bbc3366 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_amr.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_amr.jl @@ -40,6 +40,11 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval, alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), base_level = 4, med_level = 5, med_threshold = 0.1, @@ -47,13 +52,21 @@ amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first) amr_callback = AMRCallback(semi, amr_controller, interval = 5, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + dynamic_load_balancing = false) +# We disable `dynamic_load_balancing` for now, since t8code does not support +# partitioning for coarsening yet. That is, a complete family of elements always +# stays on rank and is not split up due to partitioning. Without this feature +# dynamic AMR simulations are not perfectly deterministic regarding to +# convergent tests. Once this feature is available in t8code load balancing is +# enabled again. stepsize_callback = StepsizeCallback(cfl = 1.2) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, amr_callback, stepsize_callback) diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl index ae95fa6c4df..45daaf0fbb2 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -71,6 +71,14 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval, alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), base_level = 1, med_level = 2, med_threshold = 0.1, @@ -78,13 +86,22 @@ amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first) amr_callback = AMRCallback(semi, amr_controller, interval = 5, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + dynamic_load_balancing = false) +# We disable `dynamic_load_balancing` for now, since t8code does not support +# partitioning for coarsening yet. That is, a complete family of elements always +# stays on rank and is not split up due to partitioning. Without this feature +# dynamic AMR simulations are not perfectly deterministic regarding to +# convergent tests. Once this feature is available in t8code load balancing is +# enabled again. stepsize_callback = StepsizeCallback(cfl = 1.2) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_restart, + save_solution, amr_callback, stepsize_callback) diff --git a/examples/t8code_3d_dgsem/elixir_advection_basic.jl b/examples/t8code_3d_dgsem/elixir_advection_basic.jl index b6479946971..415a9e78ba2 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_basic.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_basic.jl @@ -40,11 +40,19 @@ summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results analysis_callback = AnalysisCallback(semi, interval = 100) +# The SaveRestartCallback allows to save a file from which a Trixi.jl simulation can be restarted +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step stepsize_callback = StepsizeCallback(cfl = 1.2) # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, +callbacks = CallbackSet(summary_callback, analysis_callback, save_restart, save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl b/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl index cac6e30aa74..f12c760bb1b 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl @@ -66,11 +66,15 @@ summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results analysis_callback = AnalysisCallback(semi, interval = 100) +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step stepsize_callback = StepsizeCallback(cfl = 1.6) # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_3d_dgsem/elixir_advection_restart.jl b/examples/t8code_3d_dgsem/elixir_advection_restart.jl new file mode 100644 index 00000000000..0c158d72741 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_restart.jl @@ -0,0 +1,44 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# create a restart file + +trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_basic.jl"), + trees_per_dimension = (2, 2, 2)) + +############################################################################### +# adapt the parameters that have changed compared to "elixir_advection_extended.jl" + +# Note: If you get a restart file from somewhere else, you need to provide +# appropriate setups in the elixir loading a restart file + +restart_filename = joinpath("out", "restart_000000010.h5") +mesh = load_mesh(restart_filename) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +tspan = (load_time(restart_filename), 2.0) +dt = load_dt(restart_filename) +ode = semidiscretize(semi, tspan, restart_filename); + +# Do not overwrite the initial snapshot written by elixir_advection_extended.jl. +save_solution.condition.save_initial_solution = false + +integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks, maxiters = 100_000); + +# Get the last time index and work with that. +load_timestep!(integrator, restart_filename) + +############################################################################### +# run the simulation + +sol = solve!(integrator) +summary_callback() # print the timer summary + +# Finalize `T8codeMesh` to make sure MPI related objects in t8code are +# released before `MPI` finalizes. +!isinteractive() && finalize(mesh) diff --git a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl index dd8e7f21038..7c332e5b77a 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -71,12 +71,20 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +# The SaveRestartCallback allows to save a file from which a Trixi.jl simulation can be restarted +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step stepsize_callback = StepsizeCallback(cfl = 1.2) # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, - stepsize_callback) +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_restart, + save_solution, stepsize_callback) ############################################################################### # run the simulation diff --git a/examples/t8code_3d_dgsem/elixir_euler_ec.jl b/examples/t8code_3d_dgsem/elixir_euler_ec.jl index ad23e3440d8..2e13f7edd57 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_ec.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_ec.jl @@ -68,11 +68,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true) + stepsize_callback = StepsizeCallback(cfl = 1.0) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl index 11f3ba94e25..daf431a86ef 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl @@ -95,10 +95,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + stepsize_callback = StepsizeCallback(cfl = 1.2) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl index ca3722e6254..19cdea7ee3c 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -83,10 +83,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + stepsize_callback = StepsizeCallback(cfl = 1.2) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_3d_dgsem/elixir_euler_sedov.jl b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl index 55369fa14f2..9f42ad24ac7 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_sedov.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl @@ -81,11 +81,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true) + stepsize_callback = StepsizeCallback(cfl = 0.5) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index 9a16b104e6a..1346f603608 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -97,10 +97,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + stepsize_callback = StepsizeCallback(cfl = 0.6) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback); ############################################################################### diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl index 3962ce798cb..612b1640239 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -47,10 +47,16 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval) alive_callback = AliveCallback(analysis_interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + stepsize_callback = StepsizeCallback(cfl = 0.6) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + save_solution, stepsize_callback) ############################################################################### diff --git a/examples/tree_3d_dgsem/elixir_euler_convergence_pure_fv.jl b/examples/tree_3d_dgsem/elixir_euler_convergence_pure_fv.jl index 4789b46dacc..063053c1255 100644 --- a/examples/tree_3d_dgsem/elixir_euler_convergence_pure_fv.jl +++ b/examples/tree_3d_dgsem/elixir_euler_convergence_pure_fv.jl @@ -53,3 +53,8 @@ sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback save_everystep = false, callback = callbacks); summary_callback() # print the timer summary + +# This elixir is also tested with `T8codeMesh` which needs to be finalized +# explicitly. Call finalize to make sure MPI related objects in t8code are +# released before `MPI` finalizes. +!isinteractive() && finalize(mesh); diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl index 7c1399fc803..ddc0a2f1399 100644 --- a/src/auxiliary/t8code.jl +++ b/src/auxiliary/t8code.jl @@ -14,30 +14,33 @@ function init_t8code() return nothing end - # Initialize the sc library, has to happen before we initialize t8code. - let catch_signals = 0, print_backtrace = 0, log_handler = C_NULL - T8code.Libt8.sc_init(mpi_comm(), catch_signals, print_backtrace, log_handler, - T8code.Libt8.SC_LP_ERROR) + # Initialize `libsc`, `p4est`, and `t8code` with log level + # `SC_LP_ERROR` to prevent a lot of output in AMR simulations + # For development, log level `SC_LP_DEBUG` is recommended. + LOG_LEVEL = T8code.Libt8.SC_LP_ERROR + + if T8code.Libt8.sc_is_initialized() == 0 + # Initialize the sc library, has to happen before we initialize t8code. + let catch_signals = 0, print_backtrace = 0, log_handler = C_NULL + T8code.Libt8.sc_init(mpi_comm(), catch_signals, print_backtrace, + log_handler, + LOG_LEVEL) + end end if T8code.Libt8.p4est_is_initialized() == 0 - # Initialize `p4est` with log level ERROR to prevent a lot of output in AMR simulations - T8code.Libt8.p4est_init(C_NULL, T8code.Libt8.SC_LP_ERROR) + T8code.Libt8.p4est_init(C_NULL, LOG_LEVEL) end - # Initialize t8code with log level ERROR to prevent a lot of output in AMR simulations. - t8_init(T8code.Libt8.SC_LP_ERROR) - - if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE") - # Normally, `sc_finalize` should always be called during shutdown of an - # application. It checks whether there is still un-freed memory by t8code - # and/or T8code.jl and throws an exception if this is the case. For - # production runs this is not mandatory, but is helpful during - # development. Hence, this option is only activated when environment - # variable TRIXI_T8CODE_SC_FINALIZE exists. - @info "T8code.jl: `sc_finalize` will be called during shutdown of Trixi.jl." - MPI.add_finalize_hook!(T8code.Libt8.sc_finalize) + MPI.add_finalize_hook!() do + status = T8code.Libt8.sc_finalize_noabort() + if status != 0 + @warn("Inconsistent state detected after finalizing t8code. Have you finalized all `T8codeMesh` objects and/or properly freed/un-referenced all t8code related objects?") + end end + + # Initialize t8code. + t8_init(LOG_LEVEL) else @warn "Preferences for T8code.jl are not set correctly. Until fixed, using `T8codeMesh` will result in a crash. " * "See also https://trixi-framework.github.io/Trixi.jl/stable/parallelization/#parallel_system_MPI" @@ -50,7 +53,7 @@ function trixi_t8_get_local_element_levels(forest) # Check that forest is a committed, that is valid and usable, forest. @assert t8_forest_is_committed(forest) != 0 - levels = Vector{Int}(undef, t8_forest_get_local_num_elements(forest)) + levels = Vector{UInt8}(undef, t8_forest_get_local_num_elements(forest)) # Get the number of trees that have elements of this process. num_local_trees = t8_forest_get_num_local_trees(forest) @@ -67,7 +70,7 @@ function trixi_t8_get_local_element_levels(forest) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(forest, itree, ielement) current_index += 1 - levels[current_index] = t8_element_level(eclass_scheme, element) + levels[current_index] = UInt8(t8_element_level(eclass_scheme, element)) end # for end # for diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 155e909e292..27fc433c89c 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -790,6 +790,8 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::T8codeMesh, reinitialize_boundaries!(semi.boundary_conditions, cache) end + mesh.unsaved_changes |= has_changed + # Return true if there were any cells coarsened or refined, otherwise false. return has_changed end diff --git a/src/callbacks_step/save_restart_dg.jl b/src/callbacks_step/save_restart_dg.jl index 29b3858c135..2c6445215a7 100644 --- a/src/callbacks_step/save_restart_dg.jl +++ b/src/callbacks_step/save_restart_dg.jl @@ -48,7 +48,8 @@ function save_restart_file(u, time, dt, timestep, end function load_restart_file(mesh::Union{SerialTreeMesh, StructuredMesh, - UnstructuredMesh2D, SerialP4estMesh}, + UnstructuredMesh2D, SerialP4estMesh, + SerialT8codeMesh}, equations, dg::DG, cache, restart_file) # allocate memory @@ -88,7 +89,8 @@ function load_restart_file(mesh::Union{SerialTreeMesh, StructuredMesh, end function save_restart_file(u, time, dt, timestep, - mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, equations, + mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, restart_callback) @unpack output_directory = restart_callback @@ -105,7 +107,8 @@ function save_restart_file(u, time, dt, timestep, end function save_restart_file_parallel(u, time, dt, timestep, - mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, + mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, filename) @@ -151,7 +154,8 @@ function save_restart_file_parallel(u, time, dt, timestep, end function save_restart_file_on_root(u, time, dt, timestep, - mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, + mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, filename) @@ -204,7 +208,8 @@ function save_restart_file_on_root(u, time, dt, timestep, return filename end -function load_restart_file(mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, equations, +function load_restart_file(mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, restart_file) if HDF5.has_parallel() load_restart_file_parallel(mesh, equations, dg, cache, restart_file) @@ -213,7 +218,8 @@ function load_restart_file(mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, equ end end -function load_restart_file_parallel(mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, +function load_restart_file_parallel(mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, restart_file) # Calculate element and node counts by MPI rank @@ -264,7 +270,8 @@ function load_restart_file_parallel(mesh::Union{ParallelTreeMesh, ParallelP4estM return u_ode end -function load_restart_file_on_root(mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, +function load_restart_file_on_root(mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, restart_file) # Calculate element and node counts by MPI rank diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl index 57cdb3ef8a2..ee18ee1acbd 100644 --- a/src/callbacks_step/save_solution_dg.jl +++ b/src/callbacks_step/save_solution_dg.jl @@ -91,7 +91,8 @@ function save_solution_file(u, time, dt, timestep, end function save_solution_file(u, time, dt, timestep, - mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, equations, + mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, solution_callback, element_variables = Dict{Symbol, Any}(), @@ -136,7 +137,8 @@ function save_solution_file(u, time, dt, timestep, end function save_solution_file_parallel(data, time, dt, timestep, n_vars, - mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, + mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, solution_variables, filename, element_variables = Dict{Symbol, Any}()) @@ -198,7 +200,8 @@ function save_solution_file_parallel(data, time, dt, timestep, n_vars, end function save_solution_file_on_root(data, time, dt, timestep, n_vars, - mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, + mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, equations, dg::DG, cache, solution_variables, filename, element_variables = Dict{Symbol, Any}()) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 127bc420f65..a93401b624f 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -225,11 +225,89 @@ function save_mesh_file(mesh::P4estMesh, output_directory, timestep, mpi_paralle return filename end -# TODO: Implement this function as soon as there is support for this in `t8code`. -function save_mesh_file(mesh::T8codeMesh, output_directory, timestep, mpi_parallel) - error("Mesh file output not supported yet for `T8codeMesh`.") +# This routine works for both, serial and MPI parallel mode. The forest +# information is collected on all ranks and then gathered by the root rank. +# Since only the `levels` array of UInt8 and the global number of elements per +# tree (Int32) is necessary to reconstruct the forest it is not worth the +# effort to have a collective write to the HDF5 file. Instead, `levels` and +# `num_elements_per_tree` gets gathered by the root rank and written to disk. +function save_mesh_file(mesh::T8codeMesh, output_directory, timestep, + mpi_parallel::Union{False, True}) + + # Create output directory (if it does not exist). + mpi_isroot() && mkpath(output_directory) + + # Determine file name based on existence of meaningful time step. + if timestep > 0 + filename = joinpath(output_directory, @sprintf("mesh_%09d.h5", timestep)) + else + filename = joinpath(output_directory, "mesh.h5") + end + + # Retrieve refinement levels of all elements. + local_levels = get_levels(mesh) + if mpi_isparallel() + count = [length(local_levels)] + counts = MPI.Gather(view(count, 1), mpi_root(), mpi_comm()) + + if mpi_isroot() + levels = similar(local_levels, ncellsglobal(mesh)) + MPI.Gatherv!(local_levels, MPI.VBuffer(levels, counts), + mpi_root(), mpi_comm()) + else + MPI.Gatherv!(local_levels, nothing, mpi_root(), mpi_comm()) + end + else + levels = local_levels + end + + # Retrieve the number of elements per tree. Since a tree can be distributed + # among multiple ranks a reduction operation sums them all up. The latter + # is done on the root rank only. + num_global_trees = t8_forest_get_num_global_trees(mesh.forest) + num_elements_per_tree = zeros(t8_gloidx_t, num_global_trees) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + for local_tree_id in 0:(num_local_trees - 1) + num_local_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, + local_tree_id) + global_tree_id = t8_forest_global_tree_id(mesh.forest, local_tree_id) + num_elements_per_tree[global_tree_id + 1] = num_local_elements_in_tree + end + + if mpi_isparallel() + MPI.Reduce!(num_elements_per_tree, +, mpi_comm()) + end + + # Since the mesh attributes are replicated on all ranks, only save from MPI + # root. + if !mpi_isroot() + return filename + end + + # Retrieve face connectivity info of the coarse mesh. + treeIDs, neighIDs, faces, duals, orientations = get_cmesh_info(mesh) - return joinpath(output_directory, "dummy_mesh.h5") + # Open file (clobber existing content). + h5open(filename, "w") do file + # Add context information as attributes. + attributes(file)["mesh_type"] = get_name(mesh) + attributes(file)["ndims"] = ndims(mesh) + attributes(file)["ntrees"] = ntrees(mesh) + attributes(file)["nelements"] = ncellsglobal(mesh) + + file["tree_node_coordinates"] = mesh.tree_node_coordinates + file["nodes"] = Vector(mesh.nodes) + file["boundary_names"] = mesh.boundary_names .|> String + file["treeIDs"] = treeIDs + file["neighIDs"] = neighIDs + file["faces"] = faces + file["duals"] = duals + file["orientations"] = orientations + file["levels"] = levels + file["num_elements_per_tree"] = num_elements_per_tree + end + + return filename end """ @@ -322,6 +400,31 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) mesh = P4estMesh{ndims}(p4est, tree_node_coordinates, nodes, boundary_names, mesh_file, false, true) + + elseif mesh_type == "T8codeMesh" + ndims, ntrees, nelements, tree_node_coordinates, + nodes, boundary_names_, treeIDs, neighIDs, faces, duals, orientations, + levels, num_elements_per_tree = h5open(mesh_file, "r") do file + return read(attributes(file)["ndims"]), + read(attributes(file)["ntrees"]), + read(attributes(file)["nelements"]), + read(file["tree_node_coordinates"]), + read(file["nodes"]), + read(file["boundary_names"]), + read(file["treeIDs"]), + read(file["neighIDs"]), + read(file["faces"]), + read(file["duals"]), + read(file["orientations"]), + read(file["levels"]), + read(file["num_elements_per_tree"]) + end + + boundary_names = boundary_names_ .|> Symbol + + mesh = T8codeMesh(ndims, ntrees, nelements, tree_node_coordinates, + nodes, boundary_names, treeIDs, neighIDs, faces, + duals, orientations, levels, num_elements_per_tree) else error("Unknown mesh type!") end @@ -411,6 +514,43 @@ function load_mesh_parallel(mesh_file::AbstractString; n_cells_max, RealT) mesh = P4estMesh{ndims_}(p4est, tree_node_coordinates, nodes, boundary_names, mesh_file, false, true) + + elseif mesh_type == "T8codeMesh" + if mpi_isroot() + ndims, ntrees, nelements, tree_node_coordinates, nodes, + boundary_names_, treeIDs, neighIDs, faces, duals, orientations, levels, + num_elements_per_tree = h5open(mesh_file, "r") do file + return read(attributes(file)["ndims"]), + read(attributes(file)["ntrees"]), + read(attributes(file)["nelements"]), + read(file["tree_node_coordinates"]), + read(file["nodes"]), + read(file["boundary_names"]), + read(file["treeIDs"]), + read(file["neighIDs"]), + read(file["faces"]), + read(file["duals"]), + read(file["orientations"]), + read(file["levels"]), + read(file["num_elements_per_tree"]) + end + + boundary_names = boundary_names_ .|> Symbol + + data = (ndims, ntrees, nelements, tree_node_coordinates, nodes, + boundary_names, treeIDs, neighIDs, faces, duals, + orientations, levels, num_elements_per_tree) + MPI.bcast(data, mpi_root(), mpi_comm()) + else + data = MPI.bcast(nothing, mpi_root(), mpi_comm()) + ndims, ntrees, nelements, tree_node_coordinates, nodes, + boundary_names, treeIDs, neighIDs, faces, duals, orientations, levels, + num_elements_per_tree = data + end + + mesh = T8codeMesh(ndims, ntrees, nelements, tree_node_coordinates, + nodes, boundary_names, treeIDs, neighIDs, faces, + duals, orientations, levels, num_elements_per_tree) else error("Unknown mesh type!") end diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index 77137722921..a7d7b0313d6 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -26,6 +26,8 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: nmpiinterfaces :: Int nmpimortars :: Int + unsaved_changes::Bool + function T8codeMesh{NDIMS}(forest::Ptr{t8_forest}, tree_node_coordinates, nodes, boundary_names, current_filename) where {NDIMS} @@ -38,32 +40,17 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: mesh.boundary_names = boundary_names mesh.current_filename = current_filename mesh.tree_node_coordinates = tree_node_coordinates + mesh.unsaved_changes = true finalizer(mesh) do mesh - # When finalizing `mesh.forest`, `mesh.scheme` and `mesh.cmesh` are + # When finalizing, `forest`, `scheme`, `cmesh`, and `geometry` are # also cleaned up from within `t8code`. The cleanup code for # `cmesh` does some MPI calls for deallocating shared memory # arrays. Due to garbage collection in Julia the order of shutdown - # is not deterministic. The following code might happen after MPI - # is already in finalized state. - # If the environment variable `TRIXI_T8CODE_SC_FINALIZE` is set the - # `finalize_hook` of the MPI module takes care of the cleanup. See - # further down. However, this might cause a pile-up of `mesh` - # objects during long-running sessions. - if !MPI.Finalized() - t8_forest_unref(Ref(mesh.forest)) - end - end - - # This finalizer call is only recommended during development and not for - # production runs, especially long-running sessions since a reference to - # the `mesh` object will be kept throughout the lifetime of the session. - # See comments in `init_t8code()` in file `src/auxiliary/t8code.jl` for - # more information. - if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE") - MPI.add_finalize_hook!() do - t8_forest_unref(Ref(mesh.forest)) - end + # is not deterministic. Hence, "manual" finalization might be + # necessary in order to avoid MPI-related error output when closing + # the Julia program/session. + t8_forest_unref(Ref(mesh.forest)) end return mesh @@ -101,6 +88,161 @@ function Base.show(io::IO, ::MIME"text/plain", mesh::T8codeMesh) end end +""" + T8codeMesh(ndims, ntrees, nelements, tree_node_coordinates, nodes, + boundary_names, treeIDs, neighIDs, faces, duals, + orientations, levels, num_elements_per_tree) + +Constructor for the `T8codeMesh`. Typically called by the `load_mesh` routine. + +# Arguments +- `ndims`: Dimension of the mesh. +- `ntrees`: Global number of trees. +- `nelements`: Global number of elements. +- `tree_node_coordinates`: Node coordinates for each tree: [dimension, i, j, k, tree] +- `nodes`: Array of interpolation nodes. +- `boundary_names`: List of boundary names. +- `treeIDs`: List of tree IDs. The length is the number of conforming interfaces of the coarse mesh. +- `neighIDs`: List of neighboring tree IDs. Same length as `treeIDs`. +- `faces`: List of face IDs. Same length as `treeIDs`. +- `duals`: List of face IDs of the neighboring tree. Same length as `treeIDs`. +- `orientations`: Orientation number of the interface. Same length as `treeIDs`. +- `levels`: List of levels of each element. Has length `nelements`. +- `num_elements_per_tree`: List of global number of elements per tree. Has length `ntrees`. + +Returns a `T8codeMesh` object with a forest reconstructed by the input arguments. +""" +function T8codeMesh(ndims, ntrees, nelements, tree_node_coordinates, nodes, + boundary_names, treeIDs, neighIDs, faces, duals, + orientations, levels, num_elements_per_tree) + # Allocate new cmesh object. + cmesh = t8_cmesh_new() + + # Use linear geometry for now. There is no real Lagrange geometry + # implementation (volume nodes) yet in t8code. + linear_geom = t8_geometry_linear_new() + t8_cmesh_register_geometry(cmesh, linear_geom) + + # Determine element class. + eclass = ndims > 2 ? T8_ECLASS_HEX : T8_ECLASS_QUAD + + # Store element vertices inside the cmesh. + N = length(nodes) + vertices = zeros(3 * 2^ndims) # quads/hexs only + for i in 1:ntrees + t8_cmesh_set_tree_class(cmesh, i - 1, eclass) + + if ndims == 2 + vertices[1] = tree_node_coordinates[1, 1, 1, i] + vertices[2] = tree_node_coordinates[2, 1, 1, i] + + vertices[4] = tree_node_coordinates[1, N, 1, i] + vertices[5] = tree_node_coordinates[2, N, 1, i] + + vertices[7] = tree_node_coordinates[1, 1, N, i] + vertices[8] = tree_node_coordinates[2, 1, N, i] + + vertices[10] = tree_node_coordinates[1, N, N, i] + vertices[11] = tree_node_coordinates[2, N, N, i] + else + vertices[1] = tree_node_coordinates[1, 1, 1, 1, i] + vertices[2] = tree_node_coordinates[2, 1, 1, 1, i] + vertices[3] = tree_node_coordinates[3, 1, 1, 1, i] + + vertices[4] = tree_node_coordinates[1, N, 1, 1, i] + vertices[5] = tree_node_coordinates[2, N, 1, 1, i] + vertices[6] = tree_node_coordinates[3, N, 1, 1, i] + + vertices[7] = tree_node_coordinates[1, 1, N, 1, i] + vertices[8] = tree_node_coordinates[2, 1, N, 1, i] + vertices[9] = tree_node_coordinates[3, 1, N, 1, i] + + vertices[10] = tree_node_coordinates[1, N, N, 1, i] + vertices[11] = tree_node_coordinates[2, N, N, 1, i] + vertices[12] = tree_node_coordinates[3, N, N, 1, i] + + vertices[13] = tree_node_coordinates[1, 1, 1, N, i] + vertices[14] = tree_node_coordinates[2, 1, 1, N, i] + vertices[15] = tree_node_coordinates[3, 1, 1, N, i] + + vertices[16] = tree_node_coordinates[1, N, 1, N, i] + vertices[17] = tree_node_coordinates[2, N, 1, N, i] + vertices[18] = tree_node_coordinates[3, N, 1, N, i] + + vertices[19] = tree_node_coordinates[1, 1, N, N, i] + vertices[20] = tree_node_coordinates[2, 1, N, N, i] + vertices[21] = tree_node_coordinates[3, 1, N, N, i] + + vertices[22] = tree_node_coordinates[1, N, N, N, i] + vertices[23] = tree_node_coordinates[2, N, N, N, i] + vertices[24] = tree_node_coordinates[3, N, N, N, i] + end + + t8_cmesh_set_tree_vertices(cmesh, i - 1, vertices, 2^ndims) + end + + # Connect the coarse mesh elements. + for i in eachindex(treeIDs) + t8_cmesh_set_join(cmesh, treeIDs[i], neighIDs[i], faces[i], duals[i], + orientations[i]) + end + + t8_cmesh_commit(cmesh, mpi_comm()) + + # Init a new forest with just one element per tree. + do_face_ghost = mpi_isparallel() + scheme = t8_scheme_new_default_cxx() + initial_refinement_level = 0 + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, + mpi_comm()) + + cum_sum_num_elements_per_tree = cumsum(num_elements_per_tree) + + global_element_id = 0 # zero-based index + + # Compute the offset within the to-be-reconstructed forest. Depends on the + # MPI rank resp. first global tree id. + if mpi_rank() > 0 && t8_forest_get_local_num_elements(forest) > 0 + last_global_tree_id_of_preceding_rank = t8_forest_global_tree_id(forest, 0) - 1 + global_element_id += cum_sum_num_elements_per_tree[last_global_tree_id_of_preceding_rank + 1] + end + + function adapt_callback(forest, local_tree_id, eclass_scheme, local_element_id, + elements, is_family, + user_data) + + # Check if we are already in the next tree in terms of the `global_element_id`. + global_tree_id = t8_forest_global_tree_id(forest, local_tree_id) + if global_element_id + 1 > cum_sum_num_elements_per_tree[global_tree_id + 1] + return 0 + end + + # Test if we already reached the targeted level. + level = t8_element_level(eclass_scheme, elements[1]) + if level < levels[global_element_id + 1] + return 1 # Go one refinement level deeper. + end + + # Targeted level is reached. + global_element_id += 1 + return 0 + end + + # The adapt callback refines the forest according to the `levels` array. + # For each tree the callback recursively increases the refinement level + # till it matches with the associated section in `levels. + forest = adapt(forest, adapt_callback; recursive = true, balance = false, + partition = false, ghost = false, user_data = C_NULL) + + @assert t8_forest_get_global_num_elements(forest) == nelements + + if mpi_isparallel() + forest = partition(forest) + end + + return T8codeMesh{ndims}(forest, tree_node_coordinates, nodes, boundary_names, "") +end + """ T8codeMesh{NDIMS, RealT}(forest, boundary_names; polydeg = 1, mapping = nothing) @@ -698,10 +840,10 @@ Adapt a `T8codeMesh` according to a user-defined `adapt_callback`. - `ghost = true`: Create a ghost layer for MPI data exchange. - `user_data = C_NULL`: Pointer to some arbitrary user-defined data. """ -function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = true, - partition = true, ghost = true, user_data = C_NULL) +function adapt(forest::Ptr{t8_forest}, adapt_callback; recursive = true, balance = true, + partition = true, ghost = true, user_data = C_NULL) # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(mesh.forest) != 0 + @assert t8_forest_is_committed(forest) != 0 # Init new forest. new_forest_ref = Ref{t8_forest_t}() @@ -714,7 +856,7 @@ function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = tr t8_forest_set_user_data(new_forest, pointer_from_objref(Ref(adapt_callback_passthrough(adapt_callback, user_data)))) - t8_forest_set_adapt(new_forest, mesh.forest, + t8_forest_set_adapt(new_forest, forest, @t8_adapt_callback(adapt_callback_wrapper), recursive) if balance @@ -725,7 +867,9 @@ function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = tr t8_forest_set_partition(new_forest, set_from, set_for_coarsening) end - t8_forest_set_ghost(new_forest, ghost, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + if ghost + t8_forest_set_ghost(new_forest, ghost, T8_GHOST_FACES) + end # Julias's GC leads to random segfaults here. Temporarily switch it off. GC.enable(false) @@ -737,9 +881,12 @@ function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = tr GC.enable(true) end - mesh.forest = new_forest + return new_forest +end - return nothing +function adapt!(mesh::T8codeMesh, adapt_callback; kwargs...) + # Call `t8_forest_ref(Ref(mesh.forest))` to keep it. + mesh.forest = adapt(mesh.forest, adapt_callback; kwargs...) end """ @@ -763,27 +910,30 @@ function balance!(mesh::T8codeMesh) return nothing end -""" - Trixi.partition!(mesh::T8codeMesh) - -Partition a `T8codeMesh` in order to redistribute elements evenly among MPI ranks. - -# Arguments -- `mesh::T8codeMesh`: Initialized mesh object. -""" -function partition!(mesh::T8codeMesh) +function partition(forest::Ptr{t8_forest}) new_forest_ref = Ref{t8_forest_t}() t8_forest_init(new_forest_ref) new_forest = new_forest_ref[] - let set_from = mesh.forest, do_ghost = 1, allow_for_coarsening = 1 + let set_from = forest, do_ghost = 1, allow_for_coarsening = 1 t8_forest_set_partition(new_forest, set_from, allow_for_coarsening) t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) t8_forest_commit(new_forest) end - mesh.forest = new_forest + return new_forest +end + +""" + Trixi.partition!(mesh::T8codeMesh) + +Partition a `T8codeMesh` in order to redistribute elements evenly among MPI ranks. +# Arguments +- `mesh::T8codeMesh`: Initialized mesh object. +""" +function partition!(mesh::T8codeMesh) + mesh.forest = partition(mesh.forest) return nothing end @@ -797,10 +947,14 @@ function get_global_first_element_ids(mesh::T8codeMesh) end function count_interfaces(mesh::T8codeMesh) - @assert t8_forest_is_committed(mesh.forest) != 0 + return count_interfaces(mesh.forest, ndims(mesh)) +end - num_local_elements = t8_forest_get_local_num_elements(mesh.forest) - num_local_trees = t8_forest_get_num_local_trees(mesh.forest) +function count_interfaces(forest::Ptr{t8_forest}, ndims) + @assert t8_forest_is_committed(forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(forest) + num_local_trees = t8_forest_get_num_local_trees(forest) current_index = t8_locidx_t(0) @@ -811,32 +965,32 @@ function count_interfaces(mesh::T8codeMesh) local_num_mpi_conform = 0 local_num_mpi_mortars = 0 - visited_global_mortar_ids = Set{UInt64}([]) + visited_global_mortar_ids = Set{UInt128}([]) - max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 - max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + max_level = t8_forest_get_maxlevel(forest) + max_tree_num_elements = UInt128(2^ndims)^max_level if mpi_isparallel() - ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + ghost_num_trees = t8_forest_ghost_num_trees(forest) ghost_tree_element_offsets = [num_local_elements + - t8_forest_ghost_get_tree_element_offset(mesh.forest, + t8_forest_ghost_get_tree_element_offset(forest, itree) for itree in 0:(ghost_num_trees - 1)] - ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(forest, itree) for itree in 0:(ghost_num_trees - 1)] end for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(mesh.forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + tree_class = t8_forest_get_tree_class(forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) - num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) - global_itree = t8_forest_global_tree_id(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(forest, itree) for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + element = t8_forest_get_element_in_tree(forest, itree, ielement) level = t8_element_level(eclass_scheme, element) @@ -856,7 +1010,7 @@ function count_interfaces(mesh::T8codeMesh) forest_is_balanced = Cint(1) - t8_forest_leaf_face_neighbors(mesh.forest, itree, element, + t8_forest_leaf_face_neighbors(forest, itree, element, pneighbor_leaves_ref, iface, dual_faces_ref, num_neighbors_ref, pelement_indices_ref, pneigh_scheme_ref, @@ -890,7 +1044,7 @@ function count_interfaces(mesh::T8codeMesh) elseif level < neighbor_level local_num_mpi_mortars += 1 - global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + global_mortar_id = 2 * ndims * current_linear_id + iface else # level > neighbor_level neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= @@ -900,7 +1054,7 @@ function count_interfaces(mesh::T8codeMesh) t8_element_get_linear_id(neighbor_scheme, neighbor_leaves[1], max_level) - global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + + global_mortar_id = 2 * ndims * neighbor_linear_id + dual_faces[1] if !(global_mortar_id in visited_global_mortar_ids) @@ -909,11 +1063,12 @@ function count_interfaces(mesh::T8codeMesh) end end end - end - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leaves_ref[]) - t8_free(pelement_indices_ref[]) + t8_element_destroy(neighbor_scheme, num_neighbors, neighbor_leaves) + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leaves_ref[]) + t8_free(pelement_indices_ref[]) + end end # for current_index += 1 @@ -982,13 +1137,15 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, ] # Helper variables to compute unique global MPI interface/mortar ids. - max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 - max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + max_level = t8_forest_get_maxlevel(mesh.forest) + max_tree_num_elements = UInt128(2^ndims(mesh))^max_level # These two variables help to ensure that we count MPI mortars from smaller # elements point of view only once. - visited_global_mortar_ids = Set{UInt64}([]) - global_mortar_id_to_local = Dict{UInt64, Int}([]) + visited_global_mortar_ids = Set{UInt128}([]) + global_mortar_id_to_local = Dict{UInt128, Int}([]) + + cmesh = t8_forest_get_cmesh(mesh.forest) # Loop over all local trees. for itree in 0:(num_local_trees - 1) @@ -1013,20 +1170,6 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, # Loop over all faces of the current local element. for iface in 0:(num_faces - 1) - # Compute the `orientation` of the touching faces. - if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 - cmesh = t8_forest_get_cmesh(mesh.forest) - itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(mesh.forest, itree) - iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) - orientation_ref = Ref{Cint}() - - t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL, - orientation_ref) - orientation = orientation_ref[] - else - orientation = zero(Cint) - end - pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() pneighbor_leaves_ref = Ref{Ptr{Ptr{t8_element}}}() pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() @@ -1093,6 +1236,21 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, else neighbor_level = t8_element_level(neighbor_scheme, neighbor_leaves[1]) + # Compute the `orientation` of the touching faces. + if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 + itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(mesh.forest, + itree) + iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) + orientation_ref = Ref{Cint}() + + t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, + C_NULL, + orientation_ref) + orientation = orientation_ref[] + else + orientation = zero(Cint) + end + # Local interface or mortar. if all(neighbor_ielements .< num_local_elements) @@ -1250,11 +1408,12 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, end end end - end - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leaves_ref[]) - t8_free(pelement_indices_ref[]) + t8_element_destroy(neighbor_scheme, num_neighbors, neighbor_leaves) + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leaves_ref[]) + t8_free(pelement_indices_ref[]) + end # num_neighbors end # for iface current_index += 1 @@ -1264,6 +1423,66 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, return nothing end +function get_levels(mesh::T8codeMesh) + return trixi_t8_get_local_element_levels(mesh.forest) +end + +function get_cmesh_info(mesh::T8codeMesh) + @assert t8_forest_is_committed(mesh.forest) == 1 + cmesh = t8_forest_get_cmesh(mesh.forest) + return get_cmesh_info(cmesh, ndims(mesh)) +end + +# Note, `cmesh` is not partitioned as of now. +# Every MPI rank has a full copy of the `cmesh`. +function get_cmesh_info(cmesh::Ptr{t8_cmesh}, ndims) + num_trees = t8_cmesh_get_num_trees(cmesh) + num_faces = 2 * ndims + + num_interfaces = 0 + + dual_face_ref = Ref{Cint}() + orientation_ref = Ref{Cint}() + + # Count connected faces. + for itree in 0:(num_trees - 1) + for iface in 0:(num_faces - 1) + neigh_itree = t8_cmesh_get_face_neighbor(cmesh, itree, iface, dual_face_ref, + C_NULL) + if itree < neigh_itree || itree == neigh_itree && iface < dual_face_ref[] + num_interfaces += 1 + end + end + end + + # Allocate arrays. + treeIDs = zeros(Int, num_interfaces) + neighIDs = zeros(Int, num_interfaces) + orientations = zeros(Int8, num_interfaces) + faces = zeros(Int8, num_interfaces) + duals = zeros(Int8, num_interfaces) + + itf_index = 0 # interface index + + for itree in 0:(num_trees - 1) + for iface in 0:(num_faces - 1) + neigh_itree = t8_cmesh_get_face_neighbor(cmesh, itree, iface, dual_face_ref, + orientation_ref) + + if itree < neigh_itree || itree == neigh_itree && iface < dual_face_ref[] + itf_index += 1 + treeIDs[itf_index] = itree + neighIDs[itf_index] = neigh_itree + orientations[itf_index] = orientation_ref[] + faces[itf_index] = iface + duals[itf_index] = dual_face_ref[] + end + end + end + + return treeIDs, neighIDs, faces, duals, orientations +end + #! format: off @deprecate T8codeMesh{2}(conn::Ptr{p4est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) @deprecate T8codeMesh{3}(conn::Ptr{p8est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) diff --git a/src/solvers/dgsem_t8code/containers_parallel.jl b/src/solvers/dgsem_t8code/containers_parallel.jl index 0cb3f5887a0..731f6aea7e2 100644 --- a/src/solvers/dgsem_t8code/containers_parallel.jl +++ b/src/solvers/dgsem_t8code/containers_parallel.jl @@ -21,8 +21,9 @@ function reinitialize_containers!(mesh::ParallelT8codeMesh, equations, dg::DGSEM mpi_interfaces = mpi_interfaces, # Temporary arrays for updating `mpi_cache`. - global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), - global_interface_ids = fill(UInt64(0), nmpiinterfaces(mpi_interfaces)), + global_mortar_ids = fill(UInt128(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt128(0), + nmpiinterfaces(mpi_interfaces)), neighbor_ranks_mortar = Vector{Vector{Int}}(undef, nmpimortars(mpi_mortars)), neighbor_ranks_interface = fill(-1, nmpiinterfaces(mpi_interfaces))) diff --git a/src/solvers/dgsem_t8code/dg_parallel.jl b/src/solvers/dgsem_t8code/dg_parallel.jl index 26830261353..729d6ec0392 100644 --- a/src/solvers/dgsem_t8code/dg_parallel.jl +++ b/src/solvers/dgsem_t8code/dg_parallel.jl @@ -25,8 +25,8 @@ function create_cache(mesh::ParallelT8codeMesh, equations::AbstractEquations, dg mpi_mesh_info = (mpi_mortars = mpi_mortars, mpi_interfaces = mpi_interfaces, - global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), - global_interface_ids = fill(UInt64(0), + global_mortar_ids = fill(UInt128(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt128(0), nmpiinterfaces(mpi_interfaces)), neighbor_ranks_mortar = Vector{Vector{Int}}(undef, nmpimortars(mpi_mortars)), @@ -75,7 +75,6 @@ function init_mpi_cache!(mpi_cache::P4estMPICache, mesh::ParallelT8codeMesh, nvars, nnodes, uEltype) - n_elements_global = Int(t8_forest_get_global_num_elements(mesh.forest)) n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) diff --git a/test/Project.toml b/test/Project.toml index f8bcff947c0..64c20ea4c37 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -29,7 +29,7 @@ ExplicitImports = "1.0.1" FFMPEG = "0.4" ForwardDiff = "0.10.24" LinearAlgebra = "1" -MPI = "0.20" +MPI = "0.20.6" NLsolve = "4.5.1" OrdinaryDiffEq = "6.49.1" Plots = "1.19" diff --git a/test/test_mpi_t8code_2d.jl b/test/test_mpi_t8code_2d.jl index dcbbf626e21..af558c22aed 100644 --- a/test/test_mpi_t8code_2d.jl +++ b/test/test_mpi_t8code_2d.jl @@ -80,9 +80,9 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_solution_independent.jl"), # Expected errors are exactly the same as with TreeMesh! - l2=[4.933027431215839e-5], - linf=[0.00048678461161243136], - coverage_override=(maxiters = 6,)) + l2=[4.949660644033807e-5], + linf=[0.0004867846262313763], + coverage_override=(maxiters = 6,), atol=1e-9) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -112,6 +112,24 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") end end + @trixi_testset "elixir_advection_restart.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), + l2=[4.507575525876275e-6], + linf=[6.21489667023134e-5], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), diff --git a/test/test_mpi_t8code_3d.jl b/test/test_mpi_t8code_3d.jl index c4ca592eaf9..cc8af63f03e 100644 --- a/test/test_mpi_t8code_3d.jl +++ b/test/test_mpi_t8code_3d.jl @@ -87,6 +87,24 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") end end + @trixi_testset "elixir_advection_restart.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), + l2=[0.002590388934758452], + linf=[0.01840757696885409], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + # Compressible Euler @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl index f769e7e8096..1e50fba1449 100644 --- a/test/test_t8code_2d.jl +++ b/test/test_t8code_2d.jl @@ -15,21 +15,6 @@ mkdir(outdir) @testset "T8codeMesh2D" begin #! format: noindent -@trixi_testset "test save_mesh_file" begin - @test_throws Exception begin - # Save mesh file support will be added in the future. The following - # lines of code are here for satisfying code coverage. - - # Create dummy mesh. - mesh = T8codeMesh((1, 1), polydeg = 1, - mapping = Trixi.coordinates2mapping((-1.0, -1.0), (1.0, 1.0)), - initial_refinement_level = 1) - - # This call throws an error. - Trixi.save_mesh_file(mesh, "dummy") - end -end - @trixi_testset "test load mesh from path" begin mktempdir() do path @test_throws "Unknown file extension: .unknown_ext" begin @@ -39,7 +24,13 @@ end end @trixi_testset "test check_for_negative_volumes" begin - # test is currently broken as t8code applies a correction on the fly + # This test actually checks if a "negative volume" error is thrown. + # Since t8code currently applies a correction on-the-fly this test + # is kinda broken. The correction feature in t8code, however, is planned + # to be removed in near to midterm future. Thus, this test is kept. It will + # fail once the internal correction is removed and can then be restored + # to its original form. + # @test_throws "Discovered negative volumes" begin @test begin # Unstructured mesh with six cells which have left-handed node ordering. @@ -156,6 +147,42 @@ end end end +@trixi_testset "elixir_advection_restart.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), + l2=[4.507575525876275e-6], + linf=[6.21489667023134e-5], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_advection_restart_amr.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart_amr.jl"), + l2=[2.869137983727866e-6], + linf=[3.8353423270964804e-5], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 25,)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, diff --git a/test/test_t8code_3d.jl b/test/test_t8code_3d.jl index cb301bd6c9f..e07cba1f576 100644 --- a/test/test_t8code_3d.jl +++ b/test/test_t8code_3d.jl @@ -111,6 +111,24 @@ mkdir(outdir) end end + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_advection_restart.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), + l2=[0.002590388934758452], + linf=[0.01840757696885409], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + # This test is identical to the one in `test_p4est_3d.jl`. @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR,