From 5d906f826875fb4ee8976a9a6a28536df947b3fc Mon Sep 17 00:00:00 2001 From: Johannes Markert Date: Tue, 19 Dec 2023 16:35:05 +0100 Subject: [PATCH] Extended T8codeMesh backend to support MPI interfaces datastructures. --- .../t8code_2d_dgsem/elixir_advection_basic.jl | 17 +- src/auxiliary/mpi.jl | 15 + src/auxiliary/t8code.jl | 355 +++++++++++++----- src/callbacks_step/amr.jl | 25 +- src/callbacks_step/amr_dg2d.jl | 5 - src/callbacks_step/amr_dg3d.jl | 5 - src/callbacks_step/analysis_dg2d_parallel.jl | 4 +- src/callbacks_step/analysis_dg3d_parallel.jl | 4 +- src/callbacks_step/stepsize_dg2d.jl | 33 ++ src/callbacks_step/stepsize_dg3d.jl | 33 ++ src/meshes/t8code_mesh.jl | 36 +- .../dgsem_p4est/containers_parallel.jl | 4 +- src/solvers/dgsem_p4est/dg_2d_parallel.jl | 14 +- src/solvers/dgsem_p4est/dg_3d_parallel.jl | 16 +- src/solvers/dgsem_p4est/dg_parallel.jl | 6 +- src/solvers/dgsem_t8code/containers.jl | 11 +- src/solvers/dgsem_t8code/containers_2d.jl | 4 +- src/solvers/dgsem_t8code/containers_3d.jl | 4 +- .../dgsem_t8code/containers_parallel.jl | 70 ++++ src/solvers/dgsem_t8code/dg.jl | 7 +- src/solvers/dgsem_t8code/dg_parallel.jl | 131 +++++++ src/solvers/dgsem_tree/dg_2d_parallel.jl | 2 +- test/test_mpi_t8code_2d.jl | 88 +++++ test/test_mpi_t8code_3d.jl | 127 +++++++ 24 files changed, 860 insertions(+), 156 deletions(-) create mode 100644 src/solvers/dgsem_t8code/containers_parallel.jl create mode 100644 src/solvers/dgsem_t8code/dg_parallel.jl create mode 100644 test/test_mpi_t8code_2d.jl create mode 100644 test/test_mpi_t8code_3d.jl diff --git a/examples/t8code_2d_dgsem/elixir_advection_basic.jl b/examples/t8code_2d_dgsem/elixir_advection_basic.jl index 26ced0970fe..0c7f27f244b 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_basic.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_basic.jl @@ -22,8 +22,23 @@ mesh = T8codeMesh(trees_per_dimension, polydeg = 3, coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 1) +function my_initial_condition(x, t, + equation::LinearScalarAdvectionEquation2D) + # Store translated coordinate for easy use of exact solution + x_trans = x - equation.advection_velocity * t + + c = 1.0 + A = 0.5 + L = 2 + f = 1 / L + omega = 2 * pi * f + scalar = c + A * sin(omega * sum(x_trans)) + + return SVector(scalar) +end + # A semidiscretization collects data structures and functions for the spatial discretization -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, +semi = SemidiscretizationHyperbolic(mesh, equations, my_initial_condition, solver) ############################################################################### diff --git a/src/auxiliary/mpi.jl b/src/auxiliary/mpi.jl index c85c23670b0..d21351a36d4 100644 --- a/src/auxiliary/mpi.jl +++ b/src/auxiliary/mpi.jl @@ -60,6 +60,21 @@ end return nothing end +@inline function mpi_println_serial(args...) + if mpi_rank() > 0 + MPI.recv(mpi_rank()-1, 42, mpi_comm()) + end + + println("rank = $(mpi_rank()) | ", args...) + flush(stdout) + + if mpi_rank() < mpi_nranks() - 1 + MPI.send(undef, mpi_rank()+1, 42, mpi_comm()) + end + + return nothing +end + """ ode_norm(u, t) diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl index 99a8b8b6bb2..bd0dd27d309 100644 --- a/src/auxiliary/t8code.jl +++ b/src/auxiliary/t8code.jl @@ -54,16 +54,33 @@ function t8_free(ptr) T8code.Libt8.sc_free(t8_get_package_id(), ptr) end -function trixi_t8_count_interfaces(forest) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 +function t8_forest_ghost_get_remotes(forest) + num_remotes_ref = Ref{Cint}() + remotes_ptr = @ccall T8code.Libt8.libt8.t8_forest_ghost_get_remotes(forest::t8_forest_t, num_remotes_ref::Ptr{Cint})::Ptr{Cint} + remotes = unsafe_wrap(Array, remotes_ptr, num_remotes_ref[]) +end - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) +function t8_forest_ghost_remote_first_elem(forest, remote) + @ccall T8code.Libt8.libt8.t8_forest_ghost_remote_first_elem(forest::t8_forest_t, remote::Cint)::t8_locidx_t +end + +function t8_forest_ghost_num_trees(forest) + @ccall T8code.Libt8.libt8.t8_forest_ghost_num_trees(forest::t8_forest_t)::t8_locidx_t +end + +function t8_forest_ghost_get_tree_element_offset(forest, lghost_tree) + @ccall T8code.Libt8.libt8.t8_forest_ghost_get_tree_element_offset(forest::t8_forest_t, lghost_tree::t8_locidx_t)::t8_locidx_t +end + +function t8_forest_ghost_get_global_treeid(forest, lghost_tree) + @ccall T8code.Libt8.libt8.t8_forest_ghost_get_global_treeid(forest::t8_forest_t, lghost_tree::t8_locidx_t)::t8_gloidx_t +end + +function trixi_t8_count_interfaces(mesh) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) current_index = t8_locidx_t(0) @@ -71,20 +88,42 @@ function trixi_t8_count_interfaces(forest) local_num_mortars = 0 local_num_boundary = 0 + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + visited_global_mortar_ids = Set{UInt64}([]) + + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + if mpi_isparallel() + remotes = t8_forest_ghost_get_remotes(mesh.forest) + ghost_remote_first_elem = [num_local_elements + t8_forest_ghost_remote_first_elem(mesh.forest, remote) for remote in remotes] + + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_tree_element_offsets = [num_local_elements + t8_forest_ghost_get_tree_element_offset(mesh.forest, itree) for itree in 0:ghost_num_trees-1] + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) for itree in 0:ghost_num_trees-1] + end + for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) level = t8_element_level(eclass_scheme, element) num_faces = t8_element_num_faces(eclass_scheme, element) + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + t8_element_get_linear_id(eclass_scheme, element, max_level) + for iface in 0:(num_faces - 1) pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() @@ -95,29 +134,53 @@ function trixi_t8_count_interfaces(forest) forest_is_balanced = Cint(1) - t8_forest_leaf_face_neighbors(forest, itree, element, + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, pneighbor_leafs_ref, iface, dual_faces_ref, num_neighbors_ref, pelement_indices_ref, pneigh_scheme_ref, forest_is_balanced) num_neighbors = num_neighbors_ref[] + dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], num_neighbors) neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) neighbor_scheme = pneigh_scheme_ref[] - if num_neighbors > 0 - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] + if num_neighbors == 0 + local_num_boundary += 1 + else + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + + if all(neighbor_ielements .< num_local_elements) + # Conforming interface: The second condition ensures we + # only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] local_num_conform += 1 - elseif level < neighbor_level + elseif level < neighbor_level local_num_mortars += 1 - end - else - local_num_boundary += 1 + # else `level > neigbor_level` is ignored. + end + else + if level == neighbor_level + local_num_mpi_conform += 1 + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + else # level > neighbor_level + + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, neighbor_leafs[1], max_level) #UInt64 + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + dual_faces[1] + + if !(global_mortar_id in visited_global_mortar_ids) + push!(visited_global_mortar_ids, global_mortar_id) + local_num_mpi_mortars += 1 + end + end + end end t8_free(dual_faces_ref[]) @@ -131,47 +194,78 @@ function trixi_t8_count_interfaces(forest) return (interfaces = local_num_conform, mortars = local_num_mortars, - boundaries = local_num_boundary) + boundaries = local_num_boundary, + mpi_interfaces = local_num_mpi_conform, + mpi_mortars = local_num_mpi_mortars) end -function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundaries, - boundary_names) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 +function trixi_t8_fill_mesh_info(mesh, elements, interfaces, mortars, boundaries, + boundary_names; mpi_mesh_info = undef) - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) current_index = t8_locidx_t(0) + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + if mpi_isparallel() + remotes = t8_forest_ghost_get_remotes(mesh.forest) + ghost_remote_first_elem = [num_local_elements + t8_forest_ghost_remote_first_elem(mesh.forest, remote) for remote in remotes] + + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_tree_element_offsets = [num_local_elements + t8_forest_ghost_get_tree_element_offset(mesh.forest, itree) for itree in 0:ghost_num_trees-1] + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) for itree in 0:ghost_num_trees-1] + end + local_num_conform = 0 local_num_mortars = 0 local_num_boundary = 0 + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + # Works for quads and hexs. + map_iface_to_ichild_to_position = [ + # 0 1 2 3 4 5 6 7 ichild/iface + [ 1, 0, 2, 0, 3, 0, 4, 0 ], # 0 + [ 0, 1, 0, 2, 0, 3, 0, 4 ], # 1 + [ 1, 2, 0, 0, 3, 4, 0, 0 ], # 2 + [ 0, 0, 1, 2, 0, 0, 3, 4 ], # 3 + [ 1, 2, 3, 4, 0, 0, 0, 0 ], # 4 + [ 0, 0, 0, 0, 1, 2, 3, 4 ], # 5 + ] + + visited_global_mortar_ids = Set{UInt64}([]) + global_mortar_id_to_local = Dict{UInt64,Int}([]) + for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) level = t8_element_level(eclass_scheme, element) num_faces = t8_element_num_faces(eclass_scheme, element) - for iface in 0:(num_faces - 1) + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + t8_element_get_linear_id(eclass_scheme, element, max_level) + for iface in 0:(num_faces - 1) # Compute the `orientation` of the touching faces. if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 - cmesh = t8_forest_get_cmesh(forest) - itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(forest, itree) + cmesh = t8_forest_get_cmesh(mesh.forest) + itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(mesh.forest, itree) iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) orientation_ref = Ref{Cint}() @@ -191,7 +285,7 @@ function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundari forest_is_balanced = Cint(1) - t8_forest_leaf_face_neighbors(forest, itree, element, + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, pneighbor_leafs_ref, iface, dual_faces_ref, num_neighbors_ref, pelement_indices_ref, pneigh_scheme_ref, @@ -204,70 +298,152 @@ function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundari neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) neighbor_scheme = pneigh_scheme_ref[] - if num_neighbors > 0 + if num_neighbors == 0 # Domain boundary. + local_num_boundary += 1 + boundary_id = local_num_boundary + + boundaries.neighbor_ids[boundary_id] = current_index + 1 + + init_boundary_node_indices!(boundaries, iface, boundary_id) + + # One-based indexing. + boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] + + else # Interfaces/mortars. + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] - local_num_conform += 1 + # Local interfaces/mortars. + if all(neighbor_ielements .< num_local_elements) + # Conforming interface: The second condition ensures we only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] + local_num_conform += 1 - faces = (iface, dual_faces[1]) - interface_id = local_num_conform + interfaces.neighbor_ids[1, local_num_conform] = current_index + 1 + interfaces.neighbor_ids[2, local_num_conform] = neighbor_ielements[1] + 1 - # Write data to interfaces container. - interfaces.neighbor_ids[1, interface_id] = current_index + 1 - interfaces.neighbor_ids[2, interface_id] = neighbor_ielements[1] + 1 + init_interface_node_indices!(interfaces, (iface, dual_faces[1]), orientation, + local_num_conform) - # Save interfaces.node_indices dimension specific in containers_3d.jl. - init_interface_node_indices!(interfaces, faces, orientation, - interface_id) - # Non-conforming interface. - elseif level < neighbor_level - local_num_mortars += 1 + # Non-conforming interface. + elseif level < neighbor_level + local_num_mortars += 1 - faces = (dual_faces[1], iface) + # Last entry is the large element. + mortars.neighbor_ids[end, local_num_mortars] = current_index + 1 - mortar_id = local_num_mortars + trixi_t8_init_mortar_neighbor_ids!(mortars, iface, dual_faces[1], + orientation, neighbor_ielements, + local_num_mortars) - # Last entry is the large element. - mortars.neighbor_ids[end, mortar_id] = current_index + 1 + init_mortar_node_indices!(mortars, (dual_faces[1], iface), orientation, local_num_mortars) - # Fill in the `mortars.neighbor_ids` array and reorder if necessary. - trixi_t8_init_mortar_neighbor_ids!(mortars, faces[2], faces[1], - orientation, neighbor_ielements, - mortar_id) + # else: `level > neighbor_level` is skipped since we visit the mortar interface only once. + end - # Fill in the `mortars.node_indices` array. - init_mortar_node_indices!(mortars, faces, orientation, mortar_id) + else # MPI interfaces/mortars. + # Conforming MPI interface. + if level == neighbor_level + local_num_mpi_conform += 1 - # else: "level > neighbor_level" is skipped since we visit the mortar interface only once. - end + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= neighbor_ielements[1])] - # Domain boundary. - else - local_num_boundary += 1 - boundary_id = local_num_boundary + neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, neighbor_leafs[1], max_level) #UInt64 - boundaries.neighbor_ids[boundary_id] = current_index + 1 + if current_linear_id < neighbor_linear_id + local_side = 1 + smaller_iface = iface + smaller_linear_id = current_linear_id + faces = (iface, dual_faces[1]) + else + local_side = 2 + smaller_iface = dual_faces[1] + smaller_linear_id = neighbor_linear_id + faces = (dual_faces[1],iface) + end - init_boundary_node_indices!(boundaries, iface, boundary_id) + global_interface_id = 2 * ndims(mesh) * smaller_linear_id + smaller_iface - # One-based indexing. - boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] + mpi_mesh_info.mpi_interfaces.local_neighbor_ids[local_num_mpi_conform] = current_index + 1 + mpi_mesh_info.mpi_interfaces.local_sides[local_num_mpi_conform] = local_side + + init_mpi_interface_node_indices!(mpi_mesh_info.mpi_interfaces, faces, local_side, orientation, + local_num_mpi_conform) + + neighbor_rank = remotes[findlast(ghost_remote_first_elem .<= neighbor_ielements[1])] + mpi_mesh_info.neighbor_ranks_interface[local_num_mpi_conform] = neighbor_rank + + mpi_mesh_info.global_interface_ids[local_num_mpi_conform] = global_interface_id + + # MPI Mortar. + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + neighbor_ids = neighbor_ielements .+ 1 + + local_neighbor_positions = findall(neighbor_ids .<= num_local_elements) + local_neighbor_ids = [neighbor_ids[i] for i in local_neighbor_positions] + local_neighbor_positions = [ + map_iface_to_ichild_to_position[dual_faces[1]+1][t8_element_child_id(neighbor_scheme, neighbor_leafs[i])+1] for i in local_neighbor_positions] + + # Last entry is the large element. + push!(local_neighbor_ids, current_index + 1) + push!(local_neighbor_positions, 2^(ndims(mesh) - 1) + 1) + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_num_mpi_mortars] = local_neighbor_ids + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_num_mpi_mortars] = local_neighbor_positions + + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, (dual_faces[1], iface), orientation, local_num_mpi_mortars) + + neighbor_ranks = [remotes[findlast(ghost_remote_first_elem .<= ineighbor_ghost)] for ineighbor_ghost in filter(x -> x >= num_local_elements, neighbor_ielements)] + mpi_mesh_info.neighbor_ranks_mortar[local_num_mpi_mortars] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_num_mpi_mortars] = global_mortar_id + + # MPI Mortar: larger element is ghost + else + + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, neighbor_leafs[1], max_level) #UInt64 + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + dual_faces[1] + + if global_mortar_id in visited_global_mortar_ids + local_mpi_mortar_id = global_mortar_id_to_local[global_mortar_id] + + push!(mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id], current_index+1) + push!(mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id], map_iface_to_ichild_to_position[iface+1][t8_element_child_id(eclass_scheme, element)+1]) + else + local_num_mpi_mortars += 1 + local_mpi_mortar_id = local_num_mpi_mortars + push!(visited_global_mortar_ids,global_mortar_id) + global_mortar_id_to_local[global_mortar_id] = local_mpi_mortar_id + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id] = [current_index+1] + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id] = [map_iface_to_ichild_to_position[iface+1][t8_element_child_id(eclass_scheme, element)+1]] + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, (iface, dual_faces[1]), orientation, local_mpi_mortar_id) + + neighbor_ranks = [remotes[findlast(ghost_remote_first_elem .<= neighbor_ielements[1])]] + mpi_mesh_info.neighbor_ranks_mortar[local_mpi_mortar_id] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_mpi_mortar_id] = global_mortar_id + end + end + end end t8_free(dual_faces_ref[]) t8_free(pneighbor_leafs_ref[]) t8_free(pelement_indices_ref[]) - end # for iface = ... + end # for iface current_index += 1 - end # for - end # for + end # for ielement + end # for itree + + return nothing - return (interfaces = local_num_conform, - mortars = local_num_mortars, - boundaries = local_num_boundary) end function trixi_t8_get_local_element_levels(forest) @@ -349,15 +525,16 @@ function trixi_t8_adapt_new(old_forest, indicators) t8_forest_init(new_forest_ref) new_forest = new_forest_ref[] - let set_from = C_NULL, recursive = 0, set_for_coarsening = 0, no_repartition = 0, + let set_from = C_NULL, recursive = 0, set_for_coarsening = 1, no_repartition = 1, do_ghost = 1 t8_forest_set_user_data(new_forest, pointer(indicators)) t8_forest_set_adapt(new_forest, old_forest, @t8_adapt_callback(adapt_callback), recursive) t8_forest_set_balance(new_forest, set_from, no_repartition) - t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + # t8_forest_set_partition(new_forest, set_from, set_for_coarsening) + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) + t8_forest_commit(new_forest) end @@ -419,5 +596,7 @@ function trixi_t8_adapt!(mesh, indicators) mesh.forest = forest_cached + t8_forest_write_vtk(mesh.forest, "forest") + return differences end diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 5854c8617c3..b83a9773ddb 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -726,7 +726,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, return has_changed end -function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMesh, +function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::T8codeMesh, equations, dg::DG, cache, semi, t, iter; only_refine = false, only_coarsen = false, @@ -754,21 +754,22 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMe @trixi_timeit timer() "adapt" begin difference = @trixi_timeit timer() "mesh" trixi_t8_adapt!(mesh, indicators) - @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, - cache, difference) - end + # Store whether there were any cells coarsened or refined and perform load balancing. + has_changed = any(difference .!= 0) - # Store whether there were any cells coarsened or refined and perform load balancing. - has_changed = any(difference .!= 0) + # Check if mesh changed on other processes + if mpi_isparallel() + has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] + end - # TODO: T8codeMesh for MPI not implemented yet. - # Check if mesh changed on other processes - # if mpi_isparallel() - # has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] - # end + if has_changed + @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, + cache, difference) + end + end if has_changed - # TODO: T8codeMesh for MPI not implemented yet. + # # TODO: T8codeMesh for rebalance/partition not implemented yet. # if mpi_isparallel() && amr_callback.dynamic_load_balancing # @trixi_timeit timer() "dynamic load balancing" begin # global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1) diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index 7cb9fee8f6e..a6907c698ac 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -383,11 +383,6 @@ end function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, dg::DGSEM, cache, difference) - # Return early if there is nothing to do. - if !any(difference .!= 0) - return nothing - end - # Number of (local) cells/elements. old_nelems = nelements(dg, cache) new_nelems = ncells(mesh) diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index de5030e2ad4..db0fef42727 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -314,11 +314,6 @@ end function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{3}, equations, dg::DGSEM, cache, difference) - # Return early if there is nothing to do. - if !any(difference .!= 0) - return nothing - end - # Number of (local) cells/elements. old_nelems = nelements(dg, cache) new_nelems = ncells(mesh) diff --git a/src/callbacks_step/analysis_dg2d_parallel.jl b/src/callbacks_step/analysis_dg2d_parallel.jl index a04bf732604..8d6ea99b6a1 100644 --- a/src/callbacks_step/analysis_dg2d_parallel.jl +++ b/src/callbacks_step/analysis_dg2d_parallel.jl @@ -91,7 +91,7 @@ function calc_error_norms_per_element(func, u, t, analyzer, end function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -171,7 +171,7 @@ function integrate_via_indices(func::Func, u, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/analysis_dg3d_parallel.jl b/src/callbacks_step/analysis_dg3d_parallel.jl index d8756d91c9d..fec1acda7fc 100644 --- a/src/callbacks_step/analysis_dg3d_parallel.jl +++ b/src/callbacks_step/analysis_dg3d_parallel.jl @@ -6,7 +6,7 @@ #! format: noindent function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -64,7 +64,7 @@ function calc_error_norms(func, u, t, analyzer, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/stepsize_dg2d.jl b/src/callbacks_step/stepsize_dg2d.jl index 673c3ba6aa6..21ba8ab8a07 100644 --- a/src/callbacks_step/stepsize_dg2d.jl +++ b/src/callbacks_step/stepsize_dg2d.jl @@ -159,6 +159,22 @@ function max_dt(u, t, mesh::ParallelP4estMesh{2}, return dt end +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::P4estMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + function max_dt(u, t, mesh::ParallelP4estMesh{2}, constant_speed::True, equations, dg::DG, cache) # call the method accepting a general `mesh::P4estMesh{2}` @@ -174,4 +190,21 @@ function max_dt(u, t, mesh::ParallelP4estMesh{2}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::P4estMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + end # @muladd diff --git a/src/callbacks_step/stepsize_dg3d.jl b/src/callbacks_step/stepsize_dg3d.jl index 822ab2f87ec..9690bcf723b 100644 --- a/src/callbacks_step/stepsize_dg3d.jl +++ b/src/callbacks_step/stepsize_dg3d.jl @@ -150,4 +150,37 @@ function max_dt(u, t, mesh::ParallelP4estMesh{3}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::P4estMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::P4estMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + end # @muladd diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index e9794bbe050..23dc4241f92 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -32,7 +32,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: boundary_names, current_filename) where {NDIMS} - is_parallel = mpi_isparallel() + is_parallel = mpi_isparallel() ? True() : False() mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(forest, is_parallel) @@ -81,7 +81,7 @@ const ParallelT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:True} @inline Base.ndims(::T8codeMesh{NDIMS}) where {NDIMS} = NDIMS @inline Base.real(::T8codeMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT -@inline ntrees(mesh::T8codeMesh) = Int(t8_forest_get_num_local_trees(mesh.forest)) +@inline ntrees(mesh::T8codeMesh) = size(mesh.tree_node_coordinates)[end] @inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest)) @inline ninterfaces(mesh::T8codeMesh) = mesh.ninterfaces @inline nmortars(mesh::T8codeMesh) = mesh.nmortars @@ -188,21 +188,22 @@ function T8codeMesh(trees_per_dimension; polydeg, T8code.Libt8.p8est_connectivity_destroy(conn) end + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes + num_trees = t8_cmesh_get_num_trees(cmesh) + tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - prod(trees_per_dimension)) + num_trees) # Get cell length in reference mesh: Omega_ref = [-1,1]^2. dx = [2 / n for n in trees_per_dimension] - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) - # Non-periodic boundaries. boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension)) @@ -212,12 +213,12 @@ function T8codeMesh(trees_per_dimension; polydeg, mapping_ = mapping end - for itree in 1:num_local_trees + for itree in 1:num_trees veptr = t8_cmesh_get_tree_vertices(cmesh, itree - 1) verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) if NDIMS == 2 - # Calculate node coordinates of reference mesh. + # Calculate node coordinates of reference mesh for 2D. cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1] cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2] @@ -228,6 +229,7 @@ function T8codeMesh(trees_per_dimension; polydeg, dx[2] * nodes[j] / 2) end elseif NDIMS == 3 + # Calculate node coordinates of reference mesh for 2D. cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1] cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2] cell_z_offset = (verts[3, 1] - 0.5 * (trees_per_dimension[3] - 1)) * dx[3] @@ -289,17 +291,19 @@ conforming mesh from a `t8_cmesh` data structure. function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; mapping = nothing, polydeg = 1, RealT = Float64, initial_refinement_level = 0) where {NDIMS} + + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + num_trees = t8_cmesh_get_num_trees(cmesh) tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - num_local_trees) + num_trees) nodes_in = [-1.0, 1.0] matrix = polynomial_interpolation_matrix(nodes_in, nodes) @@ -308,7 +312,7 @@ function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; data_in = Array{RealT, 3}(undef, 2, 2, 2) tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in)) - for itree in 0:(num_local_trees - 1) + for itree in 0:(num_trees - 1) veptr = t8_cmesh_get_tree_vertices(cmesh, itree) verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) @@ -339,7 +343,7 @@ function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; data_in = Array{RealT, 4}(undef, 3, 2, 2, 2) tmp1 = zeros(RealT, 3, length(nodes), length(nodes_in), length(nodes_in)) - for itree in 0:(num_local_trees - 1) + for itree in 0:(num_trees - 1) veptr = t8_cmesh_get_tree_vertices(cmesh, itree) verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) @@ -367,7 +371,7 @@ function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; map_node_coordinates!(tree_node_coordinates, mapping) # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 2 * NDIMS, num_local_trees) + boundary_names = fill(:all, 2 * NDIMS, num_trees) return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, "") @@ -466,3 +470,7 @@ end function partition!(mesh::T8codeMesh; allow_coarsening = true, weight_fn = C_NULL) return nothing end + +function update_ghost_layer!(mesh::ParallelT8codeMesh) + return nothing +end diff --git a/src/solvers/dgsem_p4est/containers_parallel.jl b/src/solvers/dgsem_p4est/containers_parallel.jl index 7c7bd868457..ba9e44a6264 100644 --- a/src/solvers/dgsem_p4est/containers_parallel.jl +++ b/src/solvers/dgsem_p4est/containers_parallel.jl @@ -43,7 +43,7 @@ function Base.resize!(mpi_interfaces::P4estMPIInterfaceContainer, capacity) end # Create MPI interface container and initialize interface data -function init_mpi_interfaces(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_interfaces(mesh::Union{ParallelP4estMesh,ParallelT8codeMesh}, equations, basis, elements) NDIMS = ndims(elements) uEltype = eltype(elements) @@ -133,7 +133,7 @@ function Base.resize!(mpi_mortars::P4estMPIMortarContainer, capacity) end # Create MPI mortar container and initialize MPI mortar data -function init_mpi_mortars(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_mortars(mesh::Union{ParallelP4estMesh,ParallelT8codeMesh}, equations, basis, elements) NDIMS = ndims(mesh) RealT = real(mesh) uEltype = eltype(elements) diff --git a/src/solvers/dgsem_p4est/dg_2d_parallel.jl b/src/solvers/dgsem_p4est/dg_2d_parallel.jl index a8887351c46..c37d4c755b3 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parallel.jl @@ -6,7 +6,7 @@ #! format: noindent function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -43,7 +43,7 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -106,7 +106,7 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{2}, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -131,7 +131,7 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -199,7 +199,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -253,7 +253,7 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -271,7 +271,7 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer) diff --git a/src/solvers/dgsem_p4est/dg_3d_parallel.jl b/src/solvers/dgsem_p4est/dg_3d_parallel.jl index 13bf2a1a2eb..eec744e5d47 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parallel.jl @@ -6,7 +6,7 @@ #! format: noindent function rhs!(du, u, t, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data @@ -113,7 +113,7 @@ function rhs!(du, u, t, end function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -160,7 +160,7 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -237,7 +237,7 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -265,7 +265,7 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -374,7 +374,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -437,7 +437,7 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -456,7 +456,7 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer, fstar_tmp) diff --git a/src/solvers/dgsem_p4est/dg_parallel.jl b/src/solvers/dgsem_p4est/dg_parallel.jl index 712ede2bfce..e191427a79d 100644 --- a/src/solvers/dgsem_p4est/dg_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_parallel.jl @@ -166,7 +166,7 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 or 2). The data corresponding to the large side (i.e. position 3) has # size `2 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{2}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{2},ParallelT8codeMesh{2}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -185,7 +185,7 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 to 4). The data corresponding to the large side (i.e. position 5) has # size `4 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{3}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{3},ParallelT8codeMesh{3}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -491,7 +491,7 @@ end # Exchange normal directions of small elements of the MPI mortars. They are needed on all involved # MPI ranks to calculate the mortar fluxes. -function exchange_normal_directions!(mpi_mortars, mpi_cache, mesh::ParallelP4estMesh, +function exchange_normal_directions!(mpi_mortars, mpi_cache, mesh::Union{ParallelP4estMesh,ParallelT8codeMesh}, n_nodes) RealT = real(mesh) n_dims = ndims(mesh) diff --git a/src/solvers/dgsem_t8code/containers.jl b/src/solvers/dgsem_t8code/containers.jl index 093feb2985a..c6810e61847 100644 --- a/src/solvers/dgsem_t8code/containers.jl +++ b/src/solvers/dgsem_t8code/containers.jl @@ -18,19 +18,22 @@ function reinitialize_containers!(mesh::T8codeMesh, equations, dg::DGSEM, cache) @unpack boundaries = cache resize!(boundaries, mesh.nboundaries) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, + trixi_t8_fill_mesh_info(mesh, elements, interfaces, mortars, boundaries, mesh.boundary_names) return nothing end function count_required_surfaces!(mesh::T8codeMesh) - counts = trixi_t8_count_interfaces(mesh.forest) + counts = trixi_t8_count_interfaces(mesh) mesh.nmortars = counts.mortars mesh.ninterfaces = counts.interfaces mesh.nboundaries = counts.boundaries + mesh.nmpimortars = counts.mpi_mortars + mesh.nmpiinterfaces = counts.mpi_interfaces + return counts end @@ -38,7 +41,9 @@ end function count_required_surfaces(mesh::T8codeMesh) return (interfaces = mesh.ninterfaces, mortars = mesh.nmortars, - boundaries = mesh.nboundaries) + boundaries = mesh.nboundaries, + mpi_interfaces = mesh.nmpiinterfaces, + mpi_mortars = mesh.nmpimortars) end # Compatibility to `dgsem_p4est/containers.jl`. diff --git a/src/solvers/dgsem_t8code/containers_2d.jl b/src/solvers/dgsem_t8code/containers_2d.jl index 79bb28d6122..a30f0def50b 100644 --- a/src/solvers/dgsem_t8code/containers_2d.jl +++ b/src/solvers/dgsem_t8code/containers_2d.jl @@ -26,6 +26,7 @@ function calc_node_coordinates!(node_coordinates, tree_class = t8_forest_get_tree_class(mesh.forest, itree) eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) @@ -52,7 +53,7 @@ function calc_node_coordinates!(node_coordinates, multiply_dimensionwise!(view(node_coordinates, :, :, :, current_index += 1), matrix1, matrix2, view(mesh.tree_node_coordinates, :, :, :, - itree + 1), + global_itree + 1), tmp1) end end @@ -71,4 +72,5 @@ function trixi_t8_init_mortar_neighbor_ids!(mortars::P4estMortarContainer{2}, my mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + 1 end end + end # @muladd diff --git a/src/solvers/dgsem_t8code/containers_3d.jl b/src/solvers/dgsem_t8code/containers_3d.jl index e2fe996313c..756543d00ee 100644 --- a/src/solvers/dgsem_t8code/containers_3d.jl +++ b/src/solvers/dgsem_t8code/containers_3d.jl @@ -28,6 +28,7 @@ function calc_node_coordinates!(node_coordinates, tree_class = t8_forest_get_tree_class(mesh.forest, itree) eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) @@ -60,7 +61,7 @@ function calc_node_coordinates!(node_coordinates, current_index += 1), matrix1, matrix2, matrix3, view(mesh.tree_node_coordinates, :, :, :, :, - itree + 1), + global_itree + 1), tmp1) end end @@ -229,4 +230,5 @@ function trixi_t8_init_mortar_neighbor_ids!(mortars::P4estMortarContainer{3}, my end end end + end # @muladd diff --git a/src/solvers/dgsem_t8code/containers_parallel.jl b/src/solvers/dgsem_t8code/containers_parallel.jl new file mode 100644 index 00000000000..6a7dd68c619 --- /dev/null +++ b/src/solvers/dgsem_t8code/containers_parallel.jl @@ -0,0 +1,70 @@ +function reinitialize_containers!(mesh::ParallelT8codeMesh, equations, dg::DGSEM, cache) + @unpack elements = cache + resize!(elements, ncells(mesh)) + init_elements!(elements, mesh, dg.basis) + + count_required_surfaces!(mesh) + required = count_required_surfaces(mesh) + + @unpack interfaces = cache + resize!(interfaces, required.interfaces) + + @unpack boundaries = cache + resize!(boundaries, required.boundaries) + + @unpack mortars = cache + resize!(mortars, required.mortars) + + @unpack mpi_interfaces = cache + resize!(mpi_interfaces, required.mpi_interfaces) + + @unpack mpi_mortars = cache + resize!(mpi_mortars, required.mpi_mortars) + + mpi_mesh_info = ( + mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + + # Temporary arrays for updating `mpi_cache`. + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, nmpiinterfaces(mpi_interfaces)), + ) + + trixi_t8_fill_mesh_info(mesh, elements, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + @unpack mpi_cache = cache + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), eltype(elements)) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + # Re-initialize and distribute normal directions of MPI mortars; requires + # MPI communication, so the MPI cache must be re-initialized beforehand. + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_interfaces!(interfaces, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_mortars!(mortars, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers_parallel.jl`. +function init_mpi_mortars!(mpi_mortars, mesh::ParallelT8codeMesh, basis, elements) + # Do nothing. + return nothing +end diff --git a/src/solvers/dgsem_t8code/dg.jl b/src/solvers/dgsem_t8code/dg.jl index 6e9660c917d..d30d44a3238 100644 --- a/src/solvers/dgsem_t8code/dg.jl +++ b/src/solvers/dgsem_t8code/dg.jl @@ -13,7 +13,7 @@ function create_cache(mesh::T8codeMesh, equations::AbstractEquations, dg::DG, :: boundaries = init_boundaries(mesh, equations, dg.basis, elements) mortars = init_mortars(mesh, equations, dg.basis, elements) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, + trixi_t8_fill_mesh_info(mesh, elements, interfaces, mortars, boundaries, mesh.boundary_names) cache = (; elements, interfaces, boundaries, mortars) @@ -29,4 +29,9 @@ end include("containers.jl") include("containers_2d.jl") include("containers_3d.jl") + +include("containers_parallel.jl") + +include("dg_parallel.jl") + end # @muladd diff --git a/src/solvers/dgsem_t8code/dg_parallel.jl b/src/solvers/dgsem_t8code/dg_parallel.jl new file mode 100644 index 00000000000..b0bd89e8e2f --- /dev/null +++ b/src/solvers/dgsem_t8code/dg_parallel.jl @@ -0,0 +1,131 @@ +@muladd begin +#! format: noindent + +# This method is called when a SemidiscretizationHyperbolic is constructed. +# It constructs the basic `cache` used throughout the simulation to compute +# the RHS etc. +function create_cache(mesh::ParallelT8codeMesh, equations::AbstractEquations, dg::DG, ::Any, + ::Type{uEltype}) where {uEltype <: Real} + # Make sure to balance and partition the forest before creating any + # containers in case someone has tampered with the p4est after creating the + # mesh. + balance!(mesh) + partition!(mesh) + + count_required_surfaces!(mesh) + + elements = init_elements(mesh, equations, dg.basis, uEltype) + mortars = init_mortars(mesh, equations, dg.basis, elements) + interfaces = init_interfaces(mesh, equations, dg.basis, elements) + boundaries = init_boundaries(mesh, equations, dg.basis, elements) + + mpi_mortars = init_mpi_mortars(mesh, equations, dg.basis, elements) + mpi_interfaces = init_mpi_interfaces(mesh, equations, dg.basis, elements) + + mpi_mesh_info = ( + mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, nmpiinterfaces(mpi_interfaces)), + ) + + trixi_t8_fill_mesh_info(mesh, elements, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + mpi_cache = init_mpi_cache(mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), uEltype) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + cache = (; elements, interfaces, mpi_interfaces, boundaries, mortars, mpi_mortars, + mpi_cache) + + # Add specialized parts of the cache required to compute the volume integral etc. + cache = (; cache..., + create_cache(mesh, equations, dg.volume_integral, dg, uEltype)...) + cache = (; cache..., create_cache(mesh, equations, dg.mortar, uEltype)...) + + return cache +end + +function init_mpi_cache(mesh::ParallelT8codeMesh, mpi_mesh_info, nvars, nnodes, uEltype) + mpi_cache = P4estMPICache(uEltype) + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvars, nnodes, uEltype) + return mpi_cache +end + +function init_mpi_cache!(mpi_cache::P4estMPICache, mesh::ParallelT8codeMesh, mpi_mesh_info, nvars, nnodes, uEltype) + mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars = init_mpi_neighbor_connectivity(mpi_mesh_info, mesh) + + mpi_send_buffers, mpi_recv_buffers, mpi_send_requests, mpi_recv_requests = init_mpi_data_structures(mpi_neighbor_interfaces, + mpi_neighbor_mortars, + ndims(mesh), + nvars, + nnodes, + uEltype) + + n_elements_global = Int(t8_forest_get_global_num_elements(mesh.forest)) + n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) + + n_elements_by_rank = Vector{Int}(undef, mpi_nranks()) + n_elements_by_rank[mpi_rank() + 1] = n_elements_local + + MPI.Allgather!(MPI.UBuffer(n_elements_by_rank, 1), mpi_comm()) + + n_elements_by_rank = OffsetArray(n_elements_by_rank, 0:(mpi_nranks() - 1)) + + # Account for 1-based indexing in Julia. + first_element_global_id = sum(n_elements_by_rank[0:(mpi_rank()-1)]) + 1 + + @assert n_elements_global==sum(n_elements_by_rank) "error in total number of elements" + + @pack! mpi_cache = mpi_neighbor_ranks, mpi_neighbor_interfaces, + mpi_neighbor_mortars, + mpi_send_buffers, mpi_recv_buffers, + mpi_send_requests, mpi_recv_requests, + n_elements_by_rank, n_elements_global, + first_element_global_id + + return mpi_cache +end + +function init_mpi_neighbor_connectivity(mpi_mesh_info, mesh::ParallelT8codeMesh) + + @unpack mpi_interfaces, mpi_mortars, global_interface_ids, neighbor_ranks_interface, global_mortar_ids, neighbor_ranks_mortar = mpi_mesh_info + + mpi_neighbor_ranks = vcat(neighbor_ranks_interface, neighbor_ranks_mortar...) |> sort |> unique + + p = sortperm(global_interface_ids) + + neighbor_ranks_interface .= neighbor_ranks_interface[p] + interface_ids = collect(1:nmpiinterfaces(mpi_interfaces))[p] + + p = sortperm(global_mortar_ids) + neighbor_ranks_mortar .= neighbor_ranks_mortar[p] + mortar_ids = collect(1:nmpimortars(mpi_mortars))[p] + + # For each neighbor rank, init connectivity data structures + mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + mpi_neighbor_mortars = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + for (index, d) in enumerate(mpi_neighbor_ranks) + mpi_neighbor_interfaces[index] = interface_ids[findall(==(d), + neighbor_ranks_interface)] + mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (d in x), + neighbor_ranks_mortar)] + end + + # Check that all interfaces were counted exactly once + @assert mapreduce(length, +, mpi_neighbor_interfaces; init = 0) == + nmpiinterfaces(mpi_interfaces) + + return mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars +end + +end # @muladd diff --git a/src/solvers/dgsem_tree/dg_2d_parallel.jl b/src/solvers/dgsem_tree/dg_2d_parallel.jl index 8095dae123a..5505293eafe 100644 --- a/src/solvers/dgsem_tree/dg_2d_parallel.jl +++ b/src/solvers/dgsem_tree/dg_2d_parallel.jl @@ -446,7 +446,7 @@ function init_mpi_neighbor_connectivity(elements, mpi_interfaces, mpi_mortars, end function rhs!(du, u, t, - mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}}, equations, + mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data diff --git a/test/test_mpi_t8code_2d.jl b/test/test_mpi_t8code_2d.jl new file mode 100644 index 00000000000..b92d51cb44c --- /dev/null +++ b/test/test_mpi_t8code_2d.jl @@ -0,0 +1,88 @@ +module TestExamplesMPIT8codeMesh2D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") + +@testset "T8codeMesh MPI 2D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 2D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[8.311947673061856e-6], + linf=[6.627000273229378e-5]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[3.3022040342579066e-5] rtol=1.0e-4 + @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 + end + end + end + + @trixi_testset "elixir_advection_nonconforming_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_nonconforming_flag.jl"), + l2=[3.198940059144588e-5], + linf=[0.00030636069494005547]) + end + + @trixi_testset "elixir_advection_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_unstructured_flag.jl"), + l2=[0.0005379687442422346], + linf=[0.007438525029884735]) + end + + @trixi_testset "elixir_advection_amr_solution_independent.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_solution_independent.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[4.949660644033807e-5], + linf=[0.0004867846262313763], + coverage_override=(maxiters = 6,)) + end + + @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_flag.jl"), + l2=[0.001993165013217687], + linf=[0.032891018571625796], + coverage_override=(maxiters = 6,)) + end + + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), + l2=[ + 0.0034516244508588046, + 0.0023420334036925493, + 0.0024261923964557187, + 0.004731710454271893, + ], + linf=[ + 0.04155789011775046, + 0.024772109862748914, + 0.03759938693042297, + 0.08039824959535657, + ]) + end +end +end # T8codeMesh MPI + +end # module diff --git a/test/test_mpi_t8code_3d.jl b/test/test_mpi_t8code_3d.jl new file mode 100644 index 00000000000..4d7bc285850 --- /dev/null +++ b/test/test_mpi_t8code_3d.jl @@ -0,0 +1,127 @@ +module TestExamplesMPIT8codeMesh3D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") + +@testset "T8codeMesh MPI 3D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 3D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[0.00016263963870641478], + linf=[0.0014537194925779984]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[0.00016800412839949264] rtol=1.0e-4 + @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 + end + end + end + + @trixi_testset "elixir_advection_amr.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[1.1302812803902801e-5], + linf=[0.0007889950196294793], + # override values are different from the serial tests to ensure each process holds at least + # one element, otherwise OrdinaryDiffEq fails during initialization + coverage_override=(maxiters = 6, + initial_refinement_level = 2, + base_level = 2, med_level = 3, + max_level = 4)) + end + + @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_curved.jl"), + l2=[2.0556575425846923e-5], + linf=[0.00105682693484822], + tspan=(0.0, 1.0), + coverage_override=(maxiters = 6, + initial_refinement_level = 0, + base_level = 0, med_level = 1, + max_level = 2)) + end + + # Compressible Euler + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_curved.jl"), + l2=[ + 4.070355207909268e-5, + 4.4993257426833716e-5, + 5.10588457841744e-5, + 5.102840924036687e-5, + 0.00019986264001630542, + ], + linf=[ + 0.0016987332417202072, + 0.003622956808262634, + 0.002029576258317789, + 0.0024206977281964193, + 0.008526972236273522, + ], + tspan=(0.0, 0.01)) + end + + @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonperiodic.jl"), + l2=[ + 0.0015106060984283647, + 0.0014733349038567685, + 0.00147333490385685, + 0.001473334903856929, + 0.0028149479453087093, + ], + linf=[ + 0.008070806335238156, + 0.009007245083113125, + 0.009007245083121784, + 0.009007245083102688, + 0.01562861968368434, + ], + tspan=(0.0, 1.0)) + end + + @trixi_testset "elixir_euler_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), + l2=[ + 0.010380390326164493, + 0.006192950051354618, + 0.005970674274073704, + 0.005965831290564327, + 0.02628875593094754, + ], + linf=[ + 0.3326911600075694, + 0.2824952141320467, + 0.41401037398065543, + 0.45574161423218573, + 0.8099577682187109, + ], + tspan=(0.0, 0.2), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + end + +end +end # T8codeMesh MPI + +end # module