From 9b6852d70709b0971b07dd08af0fe2d78d4a068a Mon Sep 17 00:00:00 2001 From: Ahmad Peyvan <115842305+apey236@users.noreply.github.com> Date: Sat, 23 Dec 2023 10:18:42 -0500 Subject: [PATCH] Parabolic Mortar for AMR `P4estMesh{3}` (#1765) * Clean branch * Un-Comment * un-comment * test coarsen * remove redundancy * Remove support for passive terms * expand resize * comments * format * Avoid code duplication * Update src/callbacks_step/amr_dg1d.jl Co-authored-by: Michael Schlottke-Lakemper * comment * comment & format * Try to increase coverage * Slightly more expressive names * Apply suggestions from code review * add specifier for 1d * Structs for resizing parabolic helpers * check if mortars are present * reuse `reinitialize_containers!` * resize calls for parabolic helpers * update analysis callbacks * Velocities for compr euler * Init container * correct copy-paste error * resize each dim * add dispatch * Add AMR for shear layer * USe only amr shear layer * first steps towards p4est parabolic amr * Add tests * remove plots * Format * remove redundant line * platform independent tests * No need for different flux_viscous comps after adding container_viscous to p4est * Laplace 3d * Longer times to allow converage to hit coarsen! * Increase testing of Laplace 3D * Add tests for velocities * remove comment * add elixir for amr testing * adding commented out mortar routines in 2D * Adding Mortar to 2d dg parabolic term * remove testing snippet * fix comments * add more arguments for dispatch * add some temporary todo notes * some updates for AP and KS * specialize mortar_fluxes_to_elements * BUGFIX: apply_jacobian_parabolic! was incorrect for P4estMesh * fixed rhs_parabolic! for mortars * more changes to elixir * indexing bug * comments * Adding the example for nonperiodic BCs with amr * hopefully this fixes AMR boundaries for parabolic terms * add elixir * Example with non periodic bopundary conditions * remove cruft * 3D parabolic amr * TGV elixir * Creating test for AMR 3D parabolic * Formatting * test formatting * Update src/Trixi.jl * Update src/equations/compressible_euler_1d.jl * Update src/equations/compressible_euler_2d.jl * Update src/equations/compressible_euler_3d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Delete examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Create elixir_navierstokes_blast_wave_amr.jl * Update test_parabolic_3d.jl * Update NEWS.md * Update NEWS.md * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl --------- Co-authored-by: Daniel_Doehring Co-authored-by: Daniel Doehring Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> Co-authored-by: Jesse Chan --- NEWS.md | 5 + .../elixir_navierstokes_blast_wave_amr.jl | 113 ++++++ ...ir_navierstokes_taylor_green_vortex_amr.jl | 106 ++++++ src/callbacks_step/amr_dg2d.jl | 4 +- src/equations/laplace_diffusion_3d.jl | 1 + src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 347 +++++++++++++++++- test/test_parabolic_3d.jl | 67 ++++ 7 files changed, 637 insertions(+), 6 deletions(-) create mode 100644 examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl create mode 100644 examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl diff --git a/NEWS.md b/NEWS.md index abd9fd27882..e7bdd14eab2 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,11 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes in the v0.6 lifecycle + +#### Added +- AMR for hyperbolic-parabolic equations on 3D `P4estMesh` + ## Changes when updating to v0.6 from v0.5.x #### Added diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl new file mode 100644 index 00000000000..5df89fbcdf2 --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl @@ -0,0 +1,113 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Navier-Stokes equations + +# TODO: parabolic; unify names of these accessor functions +prandtl_number() = 0.72 +mu() = 6.25e-4 # equivalent to Re = 1600 + +equations = CompressibleEulerEquations3D(1.4) +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), + Prandtl = prandtl_number()) + +function initial_condition_3d_blast_wave(x, t, equations::CompressibleEulerEquations3D) + rho_c = 1.0 + p_c = 1.0 + u_c = 0.0 + + rho_o = 0.125 + p_o = 0.1 + u_o = 0.0 + + rc = 0.5 + r = sqrt(x[1]^2 + x[2]^2 + x[3]^2) + if r < rc + rho = rho_c + v1 = u_c + v2 = u_c + v3 = u_c + p = p_c + else + rho = rho_o + v1 = u_o + v2 = u_o + v3 = u_o + p = p_o + end + + return prim2cons(SVector(rho, v1, v2, v3, p), equations) +end +initial_condition = initial_condition_3d_blast_wave + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) +indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 1.0, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +coordinates_min = (-1.0, -1.0, -1.0) .* pi +coordinates_max = (1.0, 1.0, 1.0) .* pi + +trees_per_dimension = (4, 4, 4) + +mesh = P4estMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (true, true, true), initial_refinement_level = 1) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.8) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) +save_solution = SaveSolutionCallback(interval = analysis_interval, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) + +amr_controller = ControllerThreeLevel(semi, amr_indicator, + base_level = 0, + med_level = 1, med_threshold = 0.05, + max_level = 3, max_threshold = 0.1) +amr_callback = AMRCallback(semi, amr_controller, + interval = 10, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + save_solution) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) +summary_callback() # print the timer summary diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl new file mode 100644 index 00000000000..c15227a1c29 --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl @@ -0,0 +1,106 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Navier-Stokes equations + +# TODO: parabolic; unify names of these accessor functions +prandtl_number() = 0.72 +mu() = 6.25e-4 # equivalent to Re = 1600 + +equations = CompressibleEulerEquations3D(1.4) +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), + Prandtl = prandtl_number()) + +""" + initial_condition_taylor_green_vortex(x, t, equations::CompressibleEulerEquations3D) + +The classical Taylor-Green vortex. +""" +function initial_condition_taylor_green_vortex(x, t, + equations::CompressibleEulerEquations3D) + A = 1.0 # magnitude of speed + Ms = 0.1 # maximum Mach number + + rho = 1.0 + v1 = A * sin(x[1]) * cos(x[2]) * cos(x[3]) + v2 = -A * cos(x[1]) * sin(x[2]) * cos(x[3]) + v3 = 0.0 + p = (A / Ms)^2 * rho / equations.gamma # scaling to get Ms + p = p + + 1.0 / 16.0 * A^2 * rho * + (cos(2 * x[1]) * cos(2 * x[3]) + 2 * cos(2 * x[2]) + 2 * cos(2 * x[1]) + + cos(2 * x[2]) * cos(2 * x[3])) + + return prim2cons(SVector(rho, v1, v2, v3, p), equations) +end +initial_condition = initial_condition_taylor_green_vortex + +@inline function vel_mag(u, equations::CompressibleEulerEquations3D) + rho, rho_v1, rho_v2, rho_v3, _ = u + return sqrt(rho_v1^2 + rho_v2^2 + rho_v3^2) / rho +end + +volume_flux = flux_ranocha +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (-1.0, -1.0, -1.0) .* pi +coordinates_max = (1.0, 1.0, 1.0) .* pi + +trees_per_dimension = (2, 2, 2) + +mesh = P4estMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (true, true, true), initial_refinement_level = 0) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 50 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + save_analysis = true, + extra_analysis_integrals = (energy_kinetic, + energy_internal, + enstrophy)) +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +amr_indicator = IndicatorLöhner(semi, variable = vel_mag) + +amr_controller = ControllerThreeLevel(semi, amr_indicator, + base_level = 0, + med_level = 1, med_threshold = 0.1, + max_level = 3, max_threshold = 0.2) + +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = false, + adapt_initial_condition_only_refine = false) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + save_solution) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) +summary_callback() # print the timer summary diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index a6907c698ac..f33d41484f6 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -137,7 +137,7 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estM end function refine!(u_ode::AbstractVector, adaptor, - mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}}, + mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, elements_to_refine) # Call `refine!` for the hyperbolic part, which does the heavy lifting of @@ -299,7 +299,7 @@ function coarsen!(u_ode::AbstractVector, adaptor, end function coarsen!(u_ode::AbstractVector, adaptor, - mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}}, + mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, elements_to_remove) # Call `coarsen!` for the hyperbolic part, which does the heavy lifting of diff --git a/src/equations/laplace_diffusion_3d.jl b/src/equations/laplace_diffusion_3d.jl index 457e742430b..3988ce7144b 100644 --- a/src/equations/laplace_diffusion_3d.jl +++ b/src/equations/laplace_diffusion_3d.jl @@ -18,6 +18,7 @@ function varnames(variable_mapping, equations_parabolic::LaplaceDiffusion3D) varnames(variable_mapping, equations_parabolic.equations_hyperbolic) end +# no orientation specified since the flux is vector-valued function flux(u, gradients, orientation::Integer, equations_parabolic::LaplaceDiffusion3D) dudx, dudy, dudz = gradients if orientation == 1 diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index b06cdd42127..0bb97c7af02 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -102,8 +102,20 @@ function rhs_parabolic!(du, u, t, mesh::P4estMesh{3}, dg.surface_integral, dg) end - # TODO: parabolic; extend to mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars (specialized for AbstractEquationsParabolic) + # !!! NOTE: we reuse the hyperbolic cache here since it contains "mortars" and "u_threaded" + # !!! Is this OK? + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes (specialized for AbstractEquationsParabolic) + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, + mesh, equations_parabolic, dg.mortar, + dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -230,8 +242,23 @@ function calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, dg.surface_integral, dg) end - # TODO: parabolic; mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars. These should reuse the hyperbolic version of `prolong2mortars` + # !!! NOTE: we reuse the hyperbolic cache here, since it contains both `mortars` and `u_threaded`. + # !!! should we have a separate mortars/u_threaded in cache_parabolic? + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, u_transformed, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes. These should reuse the hyperbolic version of `calc_mortar_flux`, + # along with a specialization on `calc_mortar_flux!` and `mortar_fluxes_to_elements!` for + # AbstractEquationsParabolic. + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, + mesh, False(), # False() = no nonconservative terms + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -324,6 +351,93 @@ function calc_gradient!(gradients, u_transformed, t, return nothing end +# This version is called during `calc_gradients!` and must be specialized because the flux +# in the gradient is {u} which doesn't depend on normals. Thus, you don't need to scale by +# 2 and flip the sign when storing the mortar fluxes back into surface_flux_values +@inline function mortar_fluxes_to_elements!(surface_flux_values, + mesh::P4estMesh{3}, + equations::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + dg::DGSEM, cache, mortar, fstar, u_buffer, + fstar_tmp) + @unpack neighbor_ids, node_indices = cache.mortars + index_range = eachnode(dg) + # Copy solution small to small + small_indices = node_indices[1, mortar] + small_direction = indices2direction(small_indices) + + for position in 1:4 # Loop over small elements + element = neighbor_ids[position, mortar] + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + surface_flux_values[v, i, j, small_direction, element] = fstar[v, i, j, + position] + end + end + end + + # Project small fluxes to large element. + multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_lower, mortar_l2.reverse_lower, + view(fstar, .., 1), + fstar_tmp) + add_multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_upper, mortar_l2.reverse_lower, + view(fstar, .., 2), + fstar_tmp) + add_multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_lower, mortar_l2.reverse_upper, + view(fstar, .., 3), + fstar_tmp) + add_multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_upper, mortar_l2.reverse_upper, + view(fstar, .., 4), + fstar_tmp) + + # The flux is calculated in the outward direction of the small elements, + # so the sign must be switched to get the flux in outward direction + # of the large element. + # The contravariant vectors of the large element (and therefore the normal + # vectors of the large element as well) are twice as large as the + # contravariant vectors of the small elements. Therefore, the flux needs + # to be scaled by a factor of 2 to obtain the flux of the large element. + # u_buffer .*= 0.5 + + # Copy interpolated flux values from buffer to large element face in the + # correct orientation. + # Note that the index of the small sides will always run forward but + # the index of the large side might need to run backwards for flipped sides. + large_element = neighbor_ids[5, mortar] + large_indices = node_indices[2, mortar] + large_direction = indices2direction(large_indices) + large_surface_indices = surface_indices(large_indices) + + i_large_start, i_large_step_i, i_large_step_j = index_to_start_step_3d(large_surface_indices[1], + index_range) + j_large_start, j_large_step_i, j_large_step_j = index_to_start_step_3d(large_surface_indices[2], + index_range) + + # Note that the indices of the small sides will always run forward but + # the large indices might need to run backwards for flipped sides. + i_large = i_large_start + j_large = j_large_start + for j in eachnode(dg) + for i in eachnode(dg) + for v in eachvariable(equations) + surface_flux_values[v, i_large, j_large, large_direction, large_element] = u_buffer[v, + i, + j] + end + i_large += i_large_step_i + j_large += j_large_step_i + end + i_large += i_large_step_j + j_large += j_large_step_j + end + + return nothing +end + # This version is used for parabolic gradient computations @inline function calc_interface_flux!(surface_flux_values, mesh::P4estMesh{3}, nonconservative_terms::False, @@ -603,6 +717,231 @@ function calc_interface_flux!(surface_flux_values, return nothing end +function prolong2mortars_divergence!(cache, flux_viscous, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DGSEM) + @unpack neighbor_ids, node_indices = cache.mortars + @unpack fstar_tmp_threaded = cache + @unpack contravariant_vectors = cache.elements + index_range = eachnode(dg) + + flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous + + @threaded for mortar in eachmortar(dg, cache) + # Copy solution data from the small elements using "delayed indexing" with + # a start value and a step size to get the correct face and orientation. + small_indices = node_indices[1, mortar] + direction_index = indices2direction(small_indices) + + i_small_start, i_small_step_i, i_small_step_j = index_to_start_step_3d(small_indices[1], + index_range) + j_small_start, j_small_step_i, j_small_step_j = index_to_start_step_3d(small_indices[2], + index_range) + k_small_start, k_small_step_i, k_small_step_j = index_to_start_step_3d(small_indices[3], + index_range) + + for position in 1:4 # Loop over small elements + i_small = i_small_start + j_small = j_small_start + k_small = k_small_start + element = neighbor_ids[position, mortar] + for j in eachnode(dg) + for i in eachnode(dg) + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, + i_small, j_small, k_small, + element) + + for v in eachvariable(equations) + flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, k_small, + element], + flux_viscous_y[v, i_small, j_small, k_small, + element], + flux_viscous_z[v, i_small, j_small, k_small, + element]) + + cache.mortars.u[1, v, position, i, j, mortar] = dot(flux_viscous, + normal_direction) + end + i_small += i_small_step_i + j_small += j_small_step_i + k_small += k_small_step_i + end + i_small += i_small_step_j + j_small += j_small_step_j + k_small += k_small_step_j + end + end + + # Buffer to copy solution values of the large element in the correct orientation + # before interpolating + u_buffer = cache.u_threaded[Threads.threadid()] + + # temporary buffer for projections + fstar_tmp = fstar_tmp_threaded[Threads.threadid()] + + # Copy solution of large element face to buffer in the + # correct orientation + large_indices = node_indices[2, mortar] + + i_large_start, i_large_step_i, i_large_step_j = index_to_start_step_3d(large_indices[1], + index_range) + j_large_start, j_large_step_i, j_large_step_j = index_to_start_step_3d(large_indices[2], + index_range) + k_large_start, k_large_step_i, k_large_step_j = index_to_start_step_3d(large_indices[3], + index_range) + + i_large = i_large_start + j_large = j_large_start + k_large = k_large_start + element = neighbor_ids[5, mortar] # Large element + for j in eachnode(dg) + for i in eachnode(dg) + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, + i_large, j_large, k_large, element) + + for v in eachvariable(equations) + flux_viscous = SVector(flux_viscous_x[v, i_large, j_large, k_large, + element], + flux_viscous_y[v, i_large, j_large, k_large, + element], + flux_viscous_z[v, i_large, j_large, k_large, + element]) + + # We prolong the viscous flux dotted with respect the outward normal + # on the small element. We scale by -1/2 here because the normal + # direction on the large element is negative 2x that of the small + # element (these normal directions are "scaled" by the surface Jacobian) + u_buffer[v, i, j] = -0.5 * dot(flux_viscous, normal_direction) + end + i_large += i_large_step_i + j_large += j_large_step_i + k_large += k_large_step_i + end + i_large += i_large_step_j + j_large += j_large_step_j + k_large += k_large_step_j + end + + # Interpolate large element face data from buffer to small face locations + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 1, :, :, mortar), + mortar_l2.forward_lower, + mortar_l2.forward_lower, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 2, :, :, mortar), + mortar_l2.forward_upper, + mortar_l2.forward_lower, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 3, :, :, mortar), + mortar_l2.forward_lower, + mortar_l2.forward_upper, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 4, :, :, mortar), + mortar_l2.forward_upper, + mortar_l2.forward_upper, + u_buffer, + fstar_tmp) + end + + return nothing +end + +# We specialize `calc_mortar_flux!` for the divergence part of +# the parabolic terms. +function calc_mortar_flux_divergence!(surface_flux_values, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + equations::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DG, cache) + @unpack neighbor_ids, node_indices = cache.mortars + @unpack contravariant_vectors = cache.elements + @unpack fstar_threaded, fstar_tmp_threaded = cache + index_range = eachnode(dg) + + @threaded for mortar in eachmortar(dg, cache) + # Choose thread-specific pre-allocated container + fstar = fstar_threaded[Threads.threadid()] + fstar_tmp = fstar_tmp_threaded[Threads.threadid()] + + # Get index information on the small elements + small_indices = node_indices[1, mortar] + small_direction = indices2direction(small_indices) + + i_small_start, i_small_step_i, i_small_step_j = index_to_start_step_3d(small_indices[1], + index_range) + j_small_start, j_small_step_i, j_small_step_j = index_to_start_step_3d(small_indices[2], + index_range) + k_small_start, k_small_step_i, k_small_step_j = index_to_start_step_3d(small_indices[3], + index_range) + + for position in 1:4 # Loop over small elements + i_small = i_small_start + j_small = j_small_start + k_small = k_small_start + element = neighbor_ids[position, mortar] + for j in eachnode(dg) + for i in eachnode(dg) + for v in eachvariable(equations) + viscous_flux_normal_ll = cache.mortars.u[1, v, position, i, j, + mortar] + viscous_flux_normal_rr = cache.mortars.u[2, v, position, i, j, + mortar] + + # TODO: parabolic; only BR1 at the moment + fstar[v, i, j, position] = 0.5 * (viscous_flux_normal_ll + + viscous_flux_normal_rr) + end + + i_small += i_small_step_i + j_small += j_small_step_i + k_small += k_small_step_i + end + i_small += i_small_step_j + j_small += j_small_step_j + k_small += k_small_step_j + end + end + + # Buffer to interpolate flux values of the large element to before + # copying in the correct orientation + u_buffer = cache.u_threaded[Threads.threadid()] + + # this reuses the hyperbolic version of `mortar_fluxes_to_elements!` + mortar_fluxes_to_elements!(surface_flux_values, + mesh, equations, mortar_l2, dg, cache, + mortar, fstar, u_buffer, fstar_tmp) + end + + return nothing +end + +# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms. +# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for +# hyperbolic terms with conserved terms only, i.e., no nonconservative terms. +@inline function calc_mortar_flux!(fstar, + mesh::P4estMesh{3}, + nonconservative_terms::False, + equations::AbstractEquationsParabolic, + surface_integral, dg::DG, cache, + mortar_index, position_index, normal_direction, + i_node_index, j_node_index) + @unpack u = cache.mortars + @unpack surface_flux = surface_integral + + u_ll, u_rr = get_surface_node_vars(u, equations, dg, position_index, i_node_index, + j_node_index, mortar_index) + + # TODO: parabolic; only BR1 at the moment + flux_ = 0.5 * (u_ll + u_rr) + # Copy flux to buffer + set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, position_index) +end + # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` function prolong2boundaries!(cache_parabolic, flux_viscous, mesh::P4estMesh{3}, diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index d6c720cf0d9..6fbfb8259d4 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -429,6 +429,14 @@ end "elixir_advection_diffusion_amr.jl"), l2=[0.000355780485397024], linf=[0.0010810770271614256]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "TreeMesh3D: elixir_advection_diffusion_nonperiodic.jl" begin @@ -436,6 +444,65 @@ end "elixir_advection_diffusion_nonperiodic.jl"), l2=[0.0009808996243280868], linf=[0.01732621559135459]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "P4estMesh3D: elixir_navierstokes_taylor_green_vortex_amr.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + "elixir_navierstokes_taylor_green_vortex_amr.jl"), + initial_refinement_level=0, tspan=(0.0, 0.5), + l2=[ + 0.0016588740573444188, + 0.03437058632045721, + 0.03437058632045671, + 0.041038898400430075, + 0.30978593009044153, + ], + linf=[ + 0.004173569912012121, + 0.09168674832979556, + 0.09168674832975021, + 0.12129218723807476, + 0.8433893297612087, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "P4estMesh3D: elixir_navierstokes_blast_wave_amr.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + "elixir_navierstokes_blast_wave_amr.jl"), + tspan=(0.0, 0.01), + l2=[ + 0.009472104410520866, 0.0017883742549557149, + 0.0017883742549557147, 0.0017883742549557196, + 0.024388540048562748, + ], + linf=[ + 0.6782397526873181, 0.17663702154066238, + 0.17663702154066266, 0.17663702154066238, 1.7327849844825238, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end