From 3ca93afed4ab4efbd6022f65aa88b9a3a7608906 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 10 Aug 2023 22:25:36 +0200 Subject: [PATCH] L2 Mortars for Parabolic Terms on TreeMeshes (#1571) * First try mortars for parabolic terms * Use correct interface values in calc_fstar! * Format parabolic 2d dgsem * Remove unused function parameters * L2 Mortars for 3D DGSEM TreeMesh * Format * Back to original example * Dispatch 2D DGSEm rhs_parabolic for p4est and classic tree * Re-use standard prolong2mortars in gradient comp * Back to original version * Add tests for L2 mortars for hyp-para * remove whitespaces * Use original analysis callback * Test Taylor-Green with different integrator * Remove whitespace * check coverage status * Stick to CK2N54 for 3D test * Add more explicit dispatch * Less invasive treatment for mortars and p4est * Revert "Add more explicit dispatch" This reverts commit 491c923d09ba335c03524894d9f59acf1d6ee699. * More explicit dispatch * Remove additional end * Remove doubled implementations * kepp main updated with true main * Add comment * comment parabolic 3d * whitespace * Avoid allocations in parabolic boundary fluxes * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl Co-authored-by: Andrew Winters * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl Co-authored-by: Andrew Winters * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl Co-authored-by: Andrew Winters * revert alloc BC (other PR) * Revert alloc BC (other PR) * Name & News * Update NEWS.md Co-authored-by: Andrew Winters * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl Co-authored-by: Hendrik Ranocha * Check allocations --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Andrew Winters --- AUTHORS.md | 1 + NEWS.md | 1 + src/solvers/dgsem_p4est/dg_2d_parabolic.jl | 91 ++++++ src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 99 +++++++ src/solvers/dgsem_tree/dg_2d_parabolic.jl | 252 +++++++++++++++- src/solvers/dgsem_tree/dg_3d_parabolic.jl | 323 ++++++++++++++++++++- test/test_parabolic_2d.jl | 54 ++++ test/test_parabolic_3d.jl | 58 +++- 8 files changed, 870 insertions(+), 9 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index abaa3e7e037..74bfaa9c852 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -28,6 +28,7 @@ are listed in alphabetical order: * Jesse Chan * Lars Christmann * Christof Czernik +* Daniel Doehring * Patrick Ersing * Erik Faulhaber * Gregor Gassner diff --git a/NEWS.md b/NEWS.md index 8e374d9ce99..10125c40d17 100644 --- a/NEWS.md +++ b/NEWS.md @@ -9,6 +9,7 @@ for human readability. #### Added - Experimental support for 3D parabolic diffusion terms has been added. +- Non-uniform `TreeMesh` available for hyperbolic-parabolic equations. - Capability to set truly discontinuous initial conditions in 1D. - Wetting and drying feature and examples for 1D and 2D shallow water equations diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index 7e90a83a9ca..a04523d2fb4 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -22,6 +22,97 @@ function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::Abstra return cache end +# TODO: Remove in favor of the implementation for the TreeMesh +# once the P4estMesh can handle mortars as well +function rhs_parabolic!(du, u, t, mesh::P4estMesh{2}, + equations_parabolic::AbstractEquationsParabolic, + initial_condition, boundary_conditions_parabolic, source_terms, + dg::DG, parabolic_scheme, cache, cache_parabolic) + (; u_transformed, gradients, flux_viscous) = cache_parabolic + + # Convert conservative variables to a form more suitable for viscous flux calculations + @trixi_timeit timer() "transform variables" begin + transform_variables!(u_transformed, u, mesh, equations_parabolic, + dg, parabolic_scheme, cache, cache_parabolic) + end + + # Compute the gradients of the transformed variables + @trixi_timeit timer() "calculate gradient" begin + calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, + boundary_conditions_parabolic, dg, cache, cache_parabolic) + end + + # Compute and store the viscous fluxes + @trixi_timeit timer() "calculate viscous fluxes" begin + calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, + equations_parabolic, dg, cache, cache_parabolic) + end + + # The remainder of this function is essentially a regular rhs! for parabolic + # equations (i.e., it computes the divergence of the viscous fluxes) + # + # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have + # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the + # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the + # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it + # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* + # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we + # do not need to recreate the existing data structure only with a different name, and c) we do not + # need to interpolate solutions *and* gradients to the surfaces. + + # TODO: parabolic; reconsider current data structure reuse strategy + + # Reset du + @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) + + # Calculate volume integral + @trixi_timeit timer() "volume integral" begin + calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) + end + + # Prolong solution to interfaces + @trixi_timeit timer() "prolong2interfaces" begin + prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, + dg.surface_integral, dg, cache) + end + + # Calculate interface fluxes + @trixi_timeit timer() "interface flux" begin + calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, + equations_parabolic, dg, cache_parabolic) + end + + # Prolong solution to boundaries + @trixi_timeit timer() "prolong2boundaries" begin + prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, + dg.surface_integral, dg, cache) + end + + # Calculate boundary fluxes + @trixi_timeit timer() "boundary flux" begin + calc_boundary_flux_divergence!(cache_parabolic, t, + boundary_conditions_parabolic, mesh, + equations_parabolic, + dg.surface_integral, dg) + end + + # TODO: parabolic; extend to mortars + @assert nmortars(dg, cache) == 0 + + # Calculate surface integrals + @trixi_timeit timer() "surface integral" begin + calc_surface_integral!(du, u, mesh, equations_parabolic, + dg.surface_integral, dg, cache_parabolic) + end + + # Apply Jacobian from mapping to reference element + @trixi_timeit timer() "Jacobian" begin + apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) + end + + return nothing +end + function calc_gradient!(gradients, u_transformed, t, mesh::P4estMesh{2}, equations_parabolic, boundary_conditions_parabolic, dg::DG, diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index 6439cad69bb..2d26c1aff50 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -22,6 +22,105 @@ function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::Abstra return cache end +# This file collects all methods that have been updated to work with parabolic systems of equations +# +# assumptions: parabolic terms are of the form div(f(u, grad(u))) and +# will be discretized first order form as follows: +# 1. compute grad(u) +# 2. compute f(u, grad(u)) +# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) +# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). +# TODO: Remove in favor of the implementation for the TreeMesh +# once the P4estMesh can handle mortars as well +function rhs_parabolic!(du, u, t, mesh::P4estMesh{3}, + equations_parabolic::AbstractEquationsParabolic, + initial_condition, boundary_conditions_parabolic, source_terms, + dg::DG, parabolic_scheme, cache, cache_parabolic) + @unpack u_transformed, gradients, flux_viscous = cache_parabolic + + # Convert conservative variables to a form more suitable for viscous flux calculations + @trixi_timeit timer() "transform variables" begin + transform_variables!(u_transformed, u, mesh, equations_parabolic, + dg, parabolic_scheme, cache, cache_parabolic) + end + + # Compute the gradients of the transformed variables + @trixi_timeit timer() "calculate gradient" begin + calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, + boundary_conditions_parabolic, dg, cache, cache_parabolic) + end + + # Compute and store the viscous fluxes + @trixi_timeit timer() "calculate viscous fluxes" begin + calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, + equations_parabolic, dg, cache, cache_parabolic) + end + + # The remainder of this function is essentially a regular rhs! for parabolic + # equations (i.e., it computes the divergence of the viscous fluxes) + # + # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have + # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the + # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the + # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it + # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* + # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we + # do not need to recreate the existing data structure only with a different name, and c) we do not + # need to interpolate solutions *and* gradients to the surfaces. + + # TODO: parabolic; reconsider current data structure reuse strategy + + # Reset du + @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) + + # Calculate volume integral + @trixi_timeit timer() "volume integral" begin + calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) + end + + # Prolong solution to interfaces + @trixi_timeit timer() "prolong2interfaces" begin + prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, + dg.surface_integral, dg, cache) + end + + # Calculate interface fluxes + @trixi_timeit timer() "interface flux" begin + calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, + equations_parabolic, dg, cache_parabolic) + end + + # Prolong solution to boundaries + @trixi_timeit timer() "prolong2boundaries" begin + prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, + dg.surface_integral, dg, cache) + end + + # Calculate boundary fluxes + @trixi_timeit timer() "boundary flux" begin + calc_boundary_flux_divergence!(cache_parabolic, t, + boundary_conditions_parabolic, + mesh, equations_parabolic, + dg.surface_integral, dg) + end + + # TODO: parabolic; extend to mortars + @assert nmortars(dg, cache) == 0 + + # Calculate surface integrals + @trixi_timeit timer() "surface integral" begin + calc_surface_integral!(du, u, mesh, equations_parabolic, + dg.surface_integral, dg, cache_parabolic) + end + + # Apply Jacobian from mapping to reference element + @trixi_timeit timer() "Jacobian" begin + apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) + end + + return nothing +end + function calc_gradient!(gradients, u_transformed, t, mesh::P4estMesh{3}, equations_parabolic, boundary_conditions_parabolic, dg::DG, diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index c5862579992..0da25230380 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -13,7 +13,7 @@ # 2. compute f(u, grad(u)) # 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) # boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{2}, P4estMesh{2}}, +function rhs_parabolic!(du, u, t, mesh::TreeMesh{2}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) @@ -85,8 +85,18 @@ function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{2}, P4estMesh{2}}, dg.surface_integral, dg) end - # TODO: parabolic; extend to mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -500,6 +510,227 @@ function calc_boundary_flux_by_direction_divergence!(surface_flux_values::Abstra return nothing end +function prolong2mortars!(cache, flux_viscous::Tuple{AbstractArray, AbstractArray}, + mesh::TreeMesh{2}, + equations_parabolic::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, surface_integral, + dg::DGSEM) + flux_viscous_x, flux_viscous_y = flux_viscous + @threaded for mortar in eachmortar(dg, cache) + large_element = cache.mortars.neighbor_ids[3, mortar] + upper_element = cache.mortars.neighbor_ids[2, mortar] + lower_element = cache.mortars.neighbor_ids[1, mortar] + + # Copy solution small to small + if cache.mortars.large_sides[mortar] == 1 # -> small elements on right side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + for l in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper[2, v, l, mortar] = flux_viscous_x[v, 1, l, + upper_element] + cache.mortars.u_lower[2, v, l, mortar] = flux_viscous_x[v, 1, l, + lower_element] + end + end + else + # L2 mortars in y-direction + for l in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper[2, v, l, mortar] = flux_viscous_y[v, l, 1, + upper_element] + cache.mortars.u_lower[2, v, l, mortar] = flux_viscous_y[v, l, 1, + lower_element] + end + end + end + else # large_sides[mortar] == 2 -> small elements on left side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + for l in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper[1, v, l, mortar] = flux_viscous_x[v, + nnodes(dg), + l, + upper_element] + cache.mortars.u_lower[1, v, l, mortar] = flux_viscous_x[v, + nnodes(dg), + l, + lower_element] + end + end + else + # L2 mortars in y-direction + for l in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper[1, v, l, mortar] = flux_viscous_y[v, l, + nnodes(dg), + upper_element] + cache.mortars.u_lower[1, v, l, mortar] = flux_viscous_y[v, l, + nnodes(dg), + lower_element] + end + end + end + end + + # Interpolate large element face data to small interface locations + if cache.mortars.large_sides[mortar] == 1 # -> large element on left side + leftright = 1 + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + u_large = view(flux_viscous_x, :, nnodes(dg), :, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large) + else + # L2 mortars in y-direction + u_large = view(flux_viscous_y, :, :, nnodes(dg), large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large) + end + else # large_sides[mortar] == 2 -> large element on right side + leftright = 2 + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + u_large = view(flux_viscous_x, :, 1, :, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large) + else + # L2 mortars in y-direction + u_large = view(flux_viscous_y, :, :, 1, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large) + end + end + end + + return nothing +end + +# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms. +# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for +# hyperbolic terms with conserved terms only, i.e., no nonconservative terms. +function calc_mortar_flux!(surface_flux_values, + mesh::TreeMesh{2}, + equations_parabolic::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DG, cache) + @unpack surface_flux = surface_integral + @unpack u_lower, u_upper, orientations = cache.mortars + @unpack fstar_upper_threaded, fstar_lower_threaded = cache + + @threaded for mortar in eachmortar(dg, cache) + # Choose thread-specific pre-allocated container + fstar_upper = fstar_upper_threaded[Threads.threadid()] + fstar_lower = fstar_lower_threaded[Threads.threadid()] + + # Calculate fluxes + orientation = orientations[mortar] + calc_fstar!(fstar_upper, equations_parabolic, surface_flux, dg, u_upper, mortar, + orientation) + calc_fstar!(fstar_lower, equations_parabolic, surface_flux, dg, u_lower, mortar, + orientation) + + mortar_fluxes_to_elements!(surface_flux_values, + mesh, equations_parabolic, mortar_l2, dg, cache, + mortar, fstar_upper, fstar_lower) + end + + return nothing +end + +@inline function calc_fstar!(destination::AbstractArray{<:Any, 2}, + equations_parabolic::AbstractEquationsParabolic, + surface_flux, dg::DGSEM, + u_interfaces, interface, orientation) + for i in eachnode(dg) + # Call pointwise two-point numerical flux function + u_ll, u_rr = get_surface_node_vars(u_interfaces, equations_parabolic, dg, i, + interface) + # TODO: parabolic; only BR1 at the moment + flux = 0.5 * (u_ll + u_rr) + + # Copy flux to left and right element storage + set_node_vars!(destination, flux, equations_parabolic, dg, i) + end + + return nothing +end + +@inline function mortar_fluxes_to_elements!(surface_flux_values, + mesh::TreeMesh{2}, + equations_parabolic::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + dg::DGSEM, cache, + mortar, fstar_upper, fstar_lower) + large_element = cache.mortars.neighbor_ids[3, mortar] + upper_element = cache.mortars.neighbor_ids[2, mortar] + lower_element = cache.mortars.neighbor_ids[1, mortar] + + # Copy flux small to small + if cache.mortars.large_sides[mortar] == 1 # -> small elements on right side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + direction = 1 + else + # L2 mortars in y-direction + direction = 3 + end + else # large_sides[mortar] == 2 -> small elements on left side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + direction = 2 + else + # L2 mortars in y-direction + direction = 4 + end + end + surface_flux_values[:, :, direction, upper_element] .= fstar_upper + surface_flux_values[:, :, direction, lower_element] .= fstar_lower + + # Project small fluxes to large element + if cache.mortars.large_sides[mortar] == 1 # -> large element on left side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + direction = 2 + else + # L2 mortars in y-direction + direction = 4 + end + else # large_sides[mortar] == 2 -> large element on right side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + direction = 1 + else + # L2 mortars in y-direction + direction = 3 + end + end + + # TODO: Taal performance + # for v in eachvariable(equations) + # # The code below is semantically equivalent to + # # surface_flux_values[v, :, direction, large_element] .= + # # (mortar_l2.reverse_upper * fstar_upper[v, :] + mortar_l2.reverse_lower * fstar_lower[v, :]) + # # but faster and does not allocate. + # # Note that `true * some_float == some_float` in Julia, i.e. `true` acts as + # # a universal `one`. Hence, the second `mul!` means "add the matrix-vector + # # product to the current value of the destination". + # @views mul!(surface_flux_values[v, :, direction, large_element], + # mortar_l2.reverse_upper, fstar_upper[v, :]) + # @views mul!(surface_flux_values[v, :, direction, large_element], + # mortar_l2.reverse_lower, fstar_lower[v, :], true, true) + # end + # The code above could be replaced by the following code. However, the relative efficiency + # depends on the types of fstar_upper/fstar_lower and dg.l2mortar_reverse_upper. + # Using StaticArrays for both makes the code above faster for common test cases. + multiply_dimensionwise!(view(surface_flux_values, :, :, direction, large_element), + mortar_l2.reverse_upper, fstar_upper, + mortar_l2.reverse_lower, fstar_lower) + + return nothing +end + # Calculate the gradient of the transformed variables function calc_gradient!(gradients, u_transformed, t, mesh::TreeMesh{2}, equations_parabolic, @@ -589,7 +820,20 @@ function calc_gradient!(gradients, u_transformed, t, dg.surface_integral, dg) end - # TODO: parabolic; mortars + # Prolong solution to mortars + # NOTE: This re-uses the implementation for hyperbolic terms in "dg_2d.jl" + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, u_transformed, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(surface_flux_values, + mesh, + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index 5b63b971cd8..2745d312b37 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -13,7 +13,7 @@ # 2. compute f(u, grad(u)) # 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) # boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{3}, P4estMesh{3}}, +function rhs_parabolic!(du, u, t, mesh::TreeMesh{3}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) @@ -85,8 +85,18 @@ function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{3}, P4estMesh{3}}, dg.surface_integral, dg) end - # TODO: parabolic; extend to mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -583,6 +593,298 @@ function calc_boundary_flux_by_direction_divergence!(surface_flux_values::Abstra return nothing end +function prolong2mortars!(cache, + flux_viscous::Tuple{AbstractArray, AbstractArray, + AbstractArray}, + mesh::TreeMesh{3}, + equations_parabolic::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DGSEM) + # temporary buffer for projections + @unpack fstar_tmp1_threaded = cache + + flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous + @threaded for mortar in eachmortar(dg, cache) + fstar_tmp1 = fstar_tmp1_threaded[Threads.threadid()] + + lower_left_element = cache.mortars.neighbor_ids[1, mortar] + lower_right_element = cache.mortars.neighbor_ids[2, mortar] + upper_left_element = cache.mortars.neighbor_ids[3, mortar] + upper_right_element = cache.mortars.neighbor_ids[4, mortar] + large_element = cache.mortars.neighbor_ids[5, mortar] + + # Copy solution small to small + if cache.mortars.large_sides[mortar] == 1 # -> small elements on right side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + for k in eachnode(dg), j in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper_left[2, v, j, k, mortar] = flux_viscous_x[v, + 1, + j, + k, + upper_left_element] + cache.mortars.u_upper_right[2, v, j, k, mortar] = flux_viscous_x[v, + 1, + j, + k, + upper_right_element] + cache.mortars.u_lower_left[2, v, j, k, mortar] = flux_viscous_x[v, + 1, + j, + k, + lower_left_element] + cache.mortars.u_lower_right[2, v, j, k, mortar] = flux_viscous_x[v, + 1, + j, + k, + lower_right_element] + end + end + elseif cache.mortars.orientations[mortar] == 2 + # L2 mortars in y-direction + for k in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper_left[2, v, i, k, mortar] = flux_viscous_y[v, + i, + 1, + k, + upper_left_element] + cache.mortars.u_upper_right[2, v, i, k, mortar] = flux_viscous_y[v, + i, + 1, + k, + upper_right_element] + cache.mortars.u_lower_left[2, v, i, k, mortar] = flux_viscous_y[v, + i, + 1, + k, + lower_left_element] + cache.mortars.u_lower_right[2, v, i, k, mortar] = flux_viscous_y[v, + i, + 1, + k, + lower_right_element] + end + end + else # orientations[mortar] == 3 + # L2 mortars in z-direction + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper_left[2, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + 1, + upper_left_element] + cache.mortars.u_upper_right[2, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + 1, + upper_right_element] + cache.mortars.u_lower_left[2, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + 1, + lower_left_element] + cache.mortars.u_lower_right[2, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + 1, + lower_right_element] + end + end + end + else # large_sides[mortar] == 2 -> small elements on left side + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + for k in eachnode(dg), j in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper_left[1, v, j, k, mortar] = flux_viscous_x[v, + nnodes(dg), + j, + k, + upper_left_element] + cache.mortars.u_upper_right[1, v, j, k, mortar] = flux_viscous_x[v, + nnodes(dg), + j, + k, + upper_right_element] + cache.mortars.u_lower_left[1, v, j, k, mortar] = flux_viscous_x[v, + nnodes(dg), + j, + k, + lower_left_element] + cache.mortars.u_lower_right[1, v, j, k, mortar] = flux_viscous_x[v, + nnodes(dg), + j, + k, + lower_right_element] + end + end + elseif cache.mortars.orientations[mortar] == 2 + # L2 mortars in y-direction + for k in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper_left[1, v, i, k, mortar] = flux_viscous_y[v, + i, + nnodes(dg), + k, + upper_left_element] + cache.mortars.u_upper_right[1, v, i, k, mortar] = flux_viscous_y[v, + i, + nnodes(dg), + k, + upper_right_element] + cache.mortars.u_lower_left[1, v, i, k, mortar] = flux_viscous_y[v, + i, + nnodes(dg), + k, + lower_left_element] + cache.mortars.u_lower_right[1, v, i, k, mortar] = flux_viscous_y[v, + i, + nnodes(dg), + k, + lower_right_element] + end + end + else # if cache.mortars.orientations[mortar] == 3 + # L2 mortars in z-direction + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations_parabolic) + cache.mortars.u_upper_left[1, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + nnodes(dg), + upper_left_element] + cache.mortars.u_upper_right[1, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + nnodes(dg), + upper_right_element] + cache.mortars.u_lower_left[1, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + nnodes(dg), + lower_left_element] + cache.mortars.u_lower_right[1, v, i, j, mortar] = flux_viscous_z[v, + i, + j, + nnodes(dg), + lower_right_element] + end + end + end + end + + # Interpolate large element face data to small interface locations + if cache.mortars.large_sides[mortar] == 1 # -> large element on left side + leftright = 1 + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + u_large = view(flux_viscous_x, :, nnodes(dg), :, :, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large, fstar_tmp1) + elseif cache.mortars.orientations[mortar] == 2 + # L2 mortars in y-direction + u_large = view(flux_viscous_y, :, :, nnodes(dg), :, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large, fstar_tmp1) + else # cache.mortars.orientations[mortar] == 3 + # L2 mortars in z-direction + u_large = view(flux_viscous_z, :, :, :, nnodes(dg), large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large, fstar_tmp1) + end + else # large_sides[mortar] == 2 -> large element on right side + leftright = 2 + if cache.mortars.orientations[mortar] == 1 + # L2 mortars in x-direction + u_large = view(flux_viscous_x, :, 1, :, :, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large, fstar_tmp1) + elseif cache.mortars.orientations[mortar] == 2 + # L2 mortars in y-direction + u_large = view(flux_viscous_y, :, :, 1, :, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large, fstar_tmp1) + else # cache.mortars.orientations[mortar] == 3 + # L2 mortars in z-direction + u_large = view(flux_viscous_z, :, :, :, 1, large_element) + element_solutions_to_mortars!(cache.mortars, mortar_l2, leftright, + mortar, u_large, fstar_tmp1) + end + end + end + + return nothing +end + +# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms. +# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for +# hyperbolic terms with conserved terms only, i.e., no nonconservative terms. +function calc_mortar_flux!(surface_flux_values, + mesh::TreeMesh{3}, + equations_parabolic::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DG, cache) + @unpack surface_flux = surface_integral + @unpack u_lower_left, u_lower_right, u_upper_left, u_upper_right, orientations = cache.mortars + @unpack (fstar_upper_left_threaded, fstar_upper_right_threaded, + fstar_lower_left_threaded, fstar_lower_right_threaded, + fstar_tmp1_threaded) = cache + + @threaded for mortar in eachmortar(dg, cache) + # Choose thread-specific pre-allocated container + fstar_upper_left = fstar_upper_left_threaded[Threads.threadid()] + fstar_upper_right = fstar_upper_right_threaded[Threads.threadid()] + fstar_lower_left = fstar_lower_left_threaded[Threads.threadid()] + fstar_lower_right = fstar_lower_right_threaded[Threads.threadid()] + fstar_tmp1 = fstar_tmp1_threaded[Threads.threadid()] + + # Calculate fluxes + orientation = orientations[mortar] + calc_fstar!(fstar_upper_left, equations_parabolic, surface_flux, dg, + u_upper_left, mortar, + orientation) + calc_fstar!(fstar_upper_right, equations_parabolic, surface_flux, dg, + u_upper_right, + mortar, orientation) + calc_fstar!(fstar_lower_left, equations_parabolic, surface_flux, dg, + u_lower_left, mortar, + orientation) + calc_fstar!(fstar_lower_right, equations_parabolic, surface_flux, dg, + u_lower_right, + mortar, orientation) + + mortar_fluxes_to_elements!(surface_flux_values, + mesh, equations_parabolic, mortar_l2, dg, cache, + mortar, + fstar_upper_left, fstar_upper_right, + fstar_lower_left, fstar_lower_right, + fstar_tmp1) + end + + return nothing +end + +@inline function calc_fstar!(destination::AbstractArray{<:Any, 3}, + equations_parabolic::AbstractEquationsParabolic, + surface_flux, dg::DGSEM, + u_interfaces, interface, orientation) + for j in eachnode(dg), i in eachnode(dg) + # Call pointwise two-point numerical flux function + u_ll, u_rr = get_surface_node_vars(u_interfaces, equations_parabolic, dg, i, j, + interface) + # TODO: parabolic; only BR1 at the moment + flux = 0.5 * (u_ll + u_rr) + + # Copy flux to left and right element storage + set_node_vars!(destination, flux, equations_parabolic, dg, i, j) + end + + return nothing +end + # Calculate the gradient of the transformed variables function calc_gradient!(gradients, u_transformed, t, mesh::TreeMesh{3}, equations_parabolic, @@ -679,7 +981,20 @@ function calc_gradient!(gradients, u_transformed, t, dg.surface_integral, dg) end - # TODO: parabolic; mortars + # Prolong solution to mortars + # NOTE: This re-uses the implementation for hyperbolic terms in "dg_3d.jl" + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, u_transformed, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(surface_flux_values, + mesh, + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index e3bb1ed9fb1..1564a33dc41 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -125,6 +125,39 @@ isdir(outdir) && rm(outdir, recursive=true) ) end + @trixi_testset "TreeMesh2D: elixir_advection_diffusion.jl (Refined mesh)" begin + @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_diffusion.jl"), + tspan=(0.0, 0.0)) + LLID = Trixi.local_leaf_cells(mesh.tree) + num_leafs = length(LLID) + @assert num_leafs % 8 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/8)]) + tspan=(0.0, 1.5) + semi = SemidiscretizationHyperbolicParabolic(mesh, + (equations, equations_parabolic), + initial_condition, solver; + boundary_conditions=(boundary_conditions, + boundary_conditions_parabolic)) + ode = semidiscretize(semi, tspan) + analysis_callback = AnalysisCallback(semi, interval=analysis_interval) + callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, + ode_default_options()..., callback=callbacks) + ac_sol = analysis_callback(sol) + @test ac_sol.l2[1] ≈ 1.67452550744728e-6 + @test ac_sol.linf[1] ≈ 7.905059166368744e-6 + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 100 + @test (@allocated Trixi.rhs_parabolic!(du_ode, u_ode, semi, t)) < 100 + end + end + @trixi_testset "TreeMesh2D: elixir_advection_diffusion_nonperiodic.jl" begin @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_diffusion_nonperiodic.jl"), initial_refinement_level = 2, tspan=(0.0, 0.1), @@ -180,6 +213,27 @@ isdir(outdir) && rm(outdir, recursive=true) ) end + @trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl (Refined mesh)" begin + @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"), + tspan=(0.0, 0.0), initial_refinement_level=3) + LLID = Trixi.local_leaf_cells(mesh.tree) + num_leafs = length(LLID) + @assert num_leafs % 4 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/4)]) + tspan=(0.0, 0.5) + semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; + boundary_conditions=(boundary_conditions, boundary_conditions_parabolic), + source_terms=source_terms_navier_stokes_convergence_test) + ode = semidiscretize(semi, tspan) + analysis_callback = AnalysisCallback(semi, interval=analysis_interval) + callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5, + ode_default_options()..., callback=callbacks) + ac_sol = analysis_callback(sol) + @test ac_sol.l2 ≈ [0.00024296959173852447; 0.0002093263158670915; 0.0005390572390977262; 0.00026753561392341537] + @test ac_sol.linf ≈ [0.0016210102053424436; 0.002593287648655501; 0.002953907343823712; 0.002077119120180271] + end + @trixi_testset "TreeMesh2D: elixir_navierstokes_lid_driven_cavity.jl" begin @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_navierstokes_lid_driven_cavity.jl"), initial_refinement_level = 2, tspan=(0.0, 0.5), diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 67a27238969..d607962afa0 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -78,6 +78,27 @@ isdir(outdir) && rm(outdir, recursive=true) ) end + @trixi_testset "TreeMesh3D: elixir_navierstokes_convergence.jl (Refined mesh)" begin + @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", "elixir_navierstokes_convergence.jl"), + tspan=(0.0, 0.0)) + LLID = Trixi.local_leaf_cells(mesh.tree) + num_leafs = length(LLID) + @assert num_leafs % 16 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/16)]) + tspan=(0.0, 1.0) + semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; + boundary_conditions=(boundary_conditions, boundary_conditions_parabolic), + source_terms=source_terms_navier_stokes_convergence_test) + ode = semidiscretize(semi, tspan) + analysis_callback = AnalysisCallback(semi, interval=analysis_interval) + callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5, + ode_default_options()..., callback=callbacks) + ac_sol = analysis_callback(sol) + @test ac_sol.l2 ≈ [0.0003991794175622818; 0.0008853745163670504; 0.0010658655552066817; 0.0008785559918324284; 0.001403163458422815] + @test ac_sol.linf ≈ [0.0035306410538458177; 0.01505692306169911; 0.008862444161110705; 0.015065647972869856; 0.030402714743065218] + end + @trixi_testset "TreeMesh3D: elixir_navierstokes_taylor_green_vortex.jl" begin @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), initial_refinement_level = 2, tspan=(0.0, 0.25), @@ -86,6 +107,41 @@ isdir(outdir) && rm(outdir, recursive=true) ) end + @trixi_testset "TreeMesh3D: elixir_navierstokes_taylor_green_vortex.jl (Refined mesh)" begin + @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), + tspan=(0.0, 0.0)) + LLID = Trixi.local_leaf_cells(mesh.tree) + num_leafs = length(LLID) + @assert num_leafs % 32 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/32)]) + tspan=(0.0, 10.0) + semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver) + ode = semidiscretize(semi, tspan) + analysis_callback = AnalysisCallback(semi, interval=analysis_interval, save_analysis=true, + extra_analysis_integrals=(energy_kinetic, + energy_internal, + enstrophy)) + callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + # Use CarpenterKennedy2N54 since `RDPK3SpFSAL49` gives slightly different results on different machines + sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=5e-3, + save_everystep=false, callback=callbacks); + ac_sol = analysis_callback(sol) + @test ac_sol.l2 ≈ [0.0013666103707729502; 0.2313581629543744; 0.2308164306264533; 0.17460246787819503; 0.28121914446544005] + @test ac_sol.linf ≈ [0.006938093883741336; 1.028235074139312; 1.0345438209717241; 1.0821111605203542; 1.2669636522564645] + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 100 + @test (@allocated Trixi.rhs_parabolic!(du_ode, u_ode, semi, t)) < 100 + end + end + @trixi_testset "P4estMesh3D: elixir_navierstokes_convergence.jl" begin @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level = 2, tspan=(0.0, 0.1), @@ -101,8 +157,8 @@ isdir(outdir) && rm(outdir, recursive=true) linf = [0.0006696415247340326, 0.03442565722527785, 0.03442565722577423, 0.06295407168705314, 0.032857472756916195] ) end - end + # Clean up afterwards: delete Trixi.jl output directory @test_nowarn isdir(outdir) && rm(outdir, recursive=true)