Skip to content

Commit

Permalink
Merge branch 'main' into hr/semidiscretization
Browse files Browse the repository at this point in the history
  • Loading branch information
ranocha committed Sep 12, 2023
2 parents bcc0301 + 3523c49 commit a509179
Show file tree
Hide file tree
Showing 9 changed files with 523 additions and 20 deletions.
172 changes: 172 additions & 0 deletions examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
using OrdinaryDiffEq
using Trixi

###############################################################################
# semidiscretization of the ideal compressible Navier-Stokes equations

prandtl_number() = 0.72
mu() = 0.01

equations = CompressibleEulerEquations1D(1.4)
equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu=mu(), Prandtl=prandtl_number(),
gradient_variables=GradientVariablesEntropy())

# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux
solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs,
volume_integral=VolumeIntegralWeakForm())

coordinates_min = -1.0
coordinates_max = 1.0

# Create a uniformly refined mesh with periodic boundaries
mesh = TreeMesh(coordinates_min, coordinates_max,
initial_refinement_level=3,
periodicity=false,
n_cells_max=30_000) # set maximum capacity of tree data structure

# Note: the initial condition cannot be specialized to `CompressibleNavierStokesDiffusion1D`
# since it is called by both the parabolic solver (which passes in `CompressibleNavierStokesDiffusion1D`)
# and by the initial condition (which passes in `CompressibleEulerEquations1D`).
# This convergence test setup was originally derived by Andrew Winters (@andrewwinters5000)
function initial_condition_navier_stokes_convergence_test(x, t, equations)
# Amplitude and shift
A = 0.5
c = 2.0

# convenience values for trig. functions
pi_x = pi * x[1]
pi_t = pi * t

rho = c + A * cos(pi_x) * cos(pi_t)
v1 = log(x[1] + 2.0) * (1.0 - exp(-A * (x[1] - 1.0)) ) * cos(pi_t)
p = rho^2

return prim2cons(SVector(rho, v1, p), equations)
end

@inline function source_terms_navier_stokes_convergence_test(u, x, t, equations)
x = x[1]

# TODO: parabolic
# we currently need to hardcode these parameters until we fix the "combined equation" issue
# see also https://github.com/trixi-framework/Trixi.jl/pull/1160
inv_gamma_minus_one = inv(equations.gamma - 1)
Pr = prandtl_number()
mu_ = mu()

# Same settings as in `initial_condition`
# Amplitude and shift
A = 0.5
c = 2.0

# convenience values for trig. functions
pi_x = pi * x
pi_t = pi * t

# compute the manufactured solution and all necessary derivatives
rho = c + A * cos(pi_x) * cos(pi_t)
rho_t = -pi * A * cos(pi_x) * sin(pi_t)
rho_x = -pi * A * sin(pi_x) * cos(pi_t)
rho_xx = -pi * pi * A * cos(pi_x) * cos(pi_t)

v1 = log(x + 2.0) * (1.0 - exp(-A * (x - 1.0))) * cos(pi_t)
v1_t = -pi * log(x + 2.0) * (1.0 - exp(-A * (x - 1.0))) * sin(pi_t)
v1_x = (A * log(x + 2.0) * exp(-A * (x - 1.0)) + (1.0 - exp(-A * (x - 1.0))) / (x + 2.0)) * cos(pi_t)
v1_xx = (( 2.0 * A * exp(-A * (x - 1.0)) / (x + 2.0)
- A * A * log(x + 2.0) * exp(-A * (x - 1.0))
- (1.0 - exp(-A * (x - 1.0))) / ((x + 2.0) * (x + 2.0))) * cos(pi_t))

p = rho * rho
p_t = 2.0 * rho * rho_t
p_x = 2.0 * rho * rho_x
p_xx = 2.0 * rho * rho_xx + 2.0 * rho_x * rho_x

# Note this simplifies slightly because the ansatz assumes that v1 = v2
E = p * inv_gamma_minus_one + 0.5 * rho * v1^2
E_t = p_t * inv_gamma_minus_one + 0.5 * rho_t * v1^2 + rho * v1 * v1_t
E_x = p_x * inv_gamma_minus_one + 0.5 * rho_x * v1^2 + rho * v1 * v1_x

# Some convenience constants
T_const = equations.gamma * inv_gamma_minus_one / Pr
inv_rho_cubed = 1.0 / (rho^3)

# compute the source terms
# density equation
du1 = rho_t + rho_x * v1 + rho * v1_x

# y-momentum equation
du2 = ( rho_t * v1 + rho * v1_t
+ p_x + rho_x * v1^2 + 2.0 * rho * v1 * v1_x
# stress tensor from y-direction
- v1_xx * mu_)

# total energy equation
du3 = ( E_t + v1_x * (E + p) + v1 * (E_x + p_x)
# stress tensor and temperature gradient terms from x-direction
- v1_xx * v1 * mu_
- v1_x * v1_x * mu_
- T_const * inv_rho_cubed * ( p_xx * rho * rho
- 2.0 * p_x * rho * rho_x
+ 2.0 * p * rho_x * rho_x
- p * rho * rho_xx ) * mu_ )

return SVector(du1, du2, du3)
end

initial_condition = initial_condition_navier_stokes_convergence_test

# BC types
velocity_bc_left_right = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2])

heat_bc_left = Isothermal((x, t, equations) ->
Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, t, equations),
equations_parabolic))
heat_bc_right = Adiabatic((x, t, equations) -> 0.0)

boundary_condition_left = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_left)
boundary_condition_right = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_right)

# define inviscid boundary conditions
boundary_conditions = (; x_neg = boundary_condition_slip_wall,
x_pos = boundary_condition_slip_wall)

# define viscous boundary conditions
boundary_conditions_parabolic = (; x_neg = boundary_condition_left,
x_pos = boundary_condition_right)

semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver;
boundary_conditions=(boundary_conditions, boundary_conditions_parabolic),
source_terms=source_terms_navier_stokes_convergence_test)

###############################################################################
# ODE solvers, callbacks etc.

# Create ODE problem with time span `tspan`
tspan = (0.0, 1.0)
ode = semidiscretize(semi, tspan)

summary_callback = SummaryCallback()
alive_callback = AliveCallback(alive_interval=10)
analysis_interval = 100
analysis_callback = AnalysisCallback(semi, interval=analysis_interval)

amr_controller = ControllerThreeLevel(semi,
IndicatorLöhner(semi, variable=Trixi.density),
base_level=3,
med_level=4, med_threshold=0.005,
max_level=5, max_threshold=0.01)

amr_callback = AMRCallback(semi, amr_controller,
interval=5,
adapt_initial_condition=true)

# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback)

###############################################################################
# run the simulation

time_int_tol = 1e-8
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
ode_default_options()..., callback=callbacks)
summary_callback() # print the timer summary
158 changes: 158 additions & 0 deletions src/callbacks_step/amr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,16 @@ end
amr_callback(u_ode, mesh_equations_solver_cache(semi)..., semi, t, iter; kwargs...)
end

@inline function (amr_callback::AMRCallback)(u_ode::AbstractVector,
semi::SemidiscretizationHyperbolicParabolic,
t, iter;
kwargs...)
# Note that we don't `wrap_array` the vector `u_ode` to be able to `resize!`
# it when doing AMR while still dispatching on the `mesh` etc.
amr_callback(u_ode, mesh_equations_solver_cache(semi)..., semi.cache_parabolic,
semi, t, iter; kwargs...)
end

# `passive_args` is currently used for Euler with self-gravity to adapt the gravity solver
# passively without querying its indicator, based on the assumption that both solvers use
# the same mesh. That's a hack and should be improved in the future once we have more examples
Expand Down Expand Up @@ -346,6 +356,154 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
return has_changed
end

function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
equations, dg::DG,
cache, cache_parabolic,
semi::SemidiscretizationHyperbolicParabolic,
t, iter;
only_refine = false, only_coarsen = false)
@unpack controller, adaptor = amr_callback

u = wrap_array(u_ode, mesh, equations, dg, cache)
# Indicator kept based on hyperbolic variables
lambda = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, cache,
t = t, iter = iter)

if mpi_isparallel()
error("MPI has not been verified yet for parabolic AMR")

# Collect lambda for all elements
lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache))
# Use parent because n_elements_by_rank is an OffsetArray
recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank))
MPI.Allgatherv!(lambda, recvbuf, mpi_comm())
lambda = lambda_global
end

leaf_cell_ids = leaf_cells(mesh.tree)
@boundscheck begin
@assert axes(lambda)==axes(leaf_cell_ids) ("Indicator (axes = $(axes(lambda))) and leaf cell (axes = $(axes(leaf_cell_ids))) arrays have different axes")
end

@unpack to_refine, to_coarsen = amr_callback.amr_cache
empty!(to_refine)
empty!(to_coarsen)
for element in 1:length(lambda)
controller_value = lambda[element]
if controller_value > 0
push!(to_refine, leaf_cell_ids[element])
elseif controller_value < 0
push!(to_coarsen, leaf_cell_ids[element])
end
end

@trixi_timeit timer() "refine" if !only_coarsen && !isempty(to_refine)
# refine mesh
refined_original_cells = @trixi_timeit timer() "mesh" refine!(mesh.tree,
to_refine)

# Find all indices of elements whose cell ids are in refined_original_cells
# Note: This assumes same indices for hyperbolic and parabolic part.
elements_to_refine = findall(in(refined_original_cells),
cache.elements.cell_ids)

# refine solver
@trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg,
cache, cache_parabolic,
elements_to_refine)
else
# If there is nothing to refine, create empty array for later use
refined_original_cells = Int[]
end

@trixi_timeit timer() "coarsen" if !only_refine && !isempty(to_coarsen)
# Since the cells may have been shifted due to refinement, first we need to
# translate the old cell ids to the new cell ids
if !isempty(to_coarsen)
to_coarsen = original2refined(to_coarsen, refined_original_cells, mesh)
end

# Next, determine the parent cells from which the fine cells are to be
# removed, since these are needed for the coarsen! function. However, since
# we only want to coarsen if *all* child cells are marked for coarsening,
# we count the coarsening indicators for each parent cell and only coarsen
# if all children are marked as such (i.e., where the count is 2^ndims). At
# the same time, check if a cell is marked for coarsening even though it is
# *not* a leaf cell -> this can only happen if it was refined due to 2:1
# smoothing during the preceding refinement operation.
parents_to_coarsen = zeros(Int, length(mesh.tree))
for cell_id in to_coarsen
# If cell has no parent, it cannot be coarsened
if !has_parent(mesh.tree, cell_id)
continue
end

# If cell is not leaf (anymore), it cannot be coarsened
if !is_leaf(mesh.tree, cell_id)
continue
end

# Increase count for parent cell
parent_id = mesh.tree.parent_ids[cell_id]
parents_to_coarsen[parent_id] += 1
end

# Extract only those parent cells for which all children should be coarsened
to_coarsen = collect(1:length(parents_to_coarsen))[parents_to_coarsen .== 2^ndims(mesh)]

# Finally, coarsen mesh
coarsened_original_cells = @trixi_timeit timer() "mesh" coarsen!(mesh.tree,
to_coarsen)

# Convert coarsened parent cell ids to the list of child cell ids that have
# been removed, since this is the information that is expected by the solver
removed_child_cells = zeros(Int,
n_children_per_cell(mesh.tree) *
length(coarsened_original_cells))
for (index, coarse_cell_id) in enumerate(coarsened_original_cells)
for child in 1:n_children_per_cell(mesh.tree)
removed_child_cells[n_children_per_cell(mesh.tree) * (index - 1) + child] = coarse_cell_id +
child
end
end

# Find all indices of elements whose cell ids are in removed_child_cells
# Note: This assumes same indices for hyperbolic and parabolic part.
elements_to_remove = findall(in(removed_child_cells), cache.elements.cell_ids)

# coarsen solver
@trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg,
cache, cache_parabolic,
elements_to_remove)
else
# If there is nothing to coarsen, create empty array for later use
coarsened_original_cells = Int[]
end

# Store whether there were any cells coarsened or refined
has_changed = !isempty(refined_original_cells) || !isempty(coarsened_original_cells)
if has_changed # TODO: Taal decide, where shall we set this?
# don't set it to has_changed since there can be changes from earlier calls
mesh.unsaved_changes = true
end

# Dynamically balance computational load by first repartitioning the mesh and then redistributing the cells/elements
if has_changed && mpi_isparallel() && amr_callback.dynamic_load_balancing
error("MPI has not been verified yet for parabolic AMR")

@trixi_timeit timer() "dynamic load balancing" begin
old_mpi_ranks_per_cell = copy(mesh.tree.mpi_ranks)

partition!(mesh)

rebalance_solver!(u_ode, mesh, equations, dg, cache, old_mpi_ranks_per_cell)
end
end

# Return true if there were any cells coarsened or refined, otherwise false
return has_changed
end

# Copy controller values to quad user data storage, will be called below
function copy_to_quad_iter_volume(info, user_data)
info_pw = PointerWrapper(info)
Expand Down
Loading

0 comments on commit a509179

Please sign in to comment.